Created
March 5, 2020 05:50
-
-
Save Eligijus112/97626e60f2d4a27e0d2298cd4adfd3be to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # Deep learning: | |
| from keras.models import Input, Model | |
| from keras.layers import Dense | |
| # Defining the size of the embedding | |
| embed_size = 2 | |
| # Defining the neural network | |
| inp = Input(shape=(X.shape[1],)) | |
| x = Dense(units=embed_size, activation='linear')(inp) | |
| x = Dense(units=Y.shape[1], activation='softmax')(x) | |
| model = Model(inputs=inp, outputs=x) | |
| model.compile(loss = 'categorical_crossentropy', optimizer = 'adam') | |
| # Optimizing the network weights | |
| model.fit( | |
| x=X, | |
| y=Y, | |
| batch_size=256, | |
| epochs=1000 | |
| ) | |
| # Obtaining the weights from the neural network. | |
| # These are the so called word embeddings | |
| # The input layer | |
| weights = model.get_weights()[0] | |
| # Creating a dictionary to store the embeddings in. The key is a unique word and | |
| # the value is the numeric vector | |
| embedding_dict = {} | |
| for word in words: | |
| embedding_dict.update({ | |
| word: weights[unique_word_dict.get(word)] | |
| }) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment