Created
March 21, 2016 16:04
-
-
Save alexeygrigorev/ed884c433c495075f000 to your computer and use it in GitHub Desktop.
Word2Vec with Tensorflow on GPU
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
graph = tf.Graph() | |
with graph.as_default(): | |
with graph.device('/gpu:0'): | |
# input data | |
train_dataset = tf.placeholder(tf.int32, shape=[batch_size]) | |
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) | |
valid_dataset = tf.constant(valid_examples, dtype=tf.int32) | |
# variables | |
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) | |
softmax_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], | |
stddev=1.0 / math.sqrt(embedding_size))) | |
softmax_biases = tf.Variable(tf.zeros([vocabulary_size])) | |
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) | |
normalized_embeddings = embeddings / norm | |
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset) | |
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings)) | |
# model | |
# look up embeddings for inputs | |
embed = tf.nn.embedding_lookup(embeddings, train_dataset) | |
# Compute the softmax loss, using a sample of the negative labels each time. | |
loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed, | |
train_labels, num_sampled, vocabulary_size)) | |
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hello, can you share the complete code.
I am trying to use this snippet in my code it still seems to give me errors.