Created
November 29, 2017 07:37
-
-
Save devforfu/d61d7a689b7229ecbbb770f85134559b to your computer and use it in GitHub Desktop.
Word2Vec SkipGrams faulty code
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
A source code converted from: | |
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/udacity/5_word2vec.ipynb | |
Into single script. But somehow, this implementation doesnt' show any improvement after 100000 iterations. | |
""" | |
import os | |
import math | |
import random | |
import zipfile | |
import collections | |
from pathlib import Path | |
from six.moves import range | |
from six.moves.urllib.request import urlretrieve | |
import numpy as np | |
import tensorflow as tf | |
DATA_ROOT = Path('~').joinpath('data').expanduser() | |
TEXT_DATA = DATA_ROOT.joinpath('text8.zip') | |
VOCABULARY_SIZE = 50000 | |
def print_line(char='-', length=80): | |
print(char * length) | |
def maybe_download(download_path, | |
expected_bytes, | |
url='http://mattmahoney.net/dc/'): | |
"""Download a file if not present, and make sure it's the right size.""" | |
filename = os.path.basename(download_path) | |
if not os.path.exists(download_path): | |
filename, _ = urlretrieve(url + filename, download_path) | |
statinfo = os.stat(download_path) | |
if statinfo.st_size == expected_bytes: | |
print('Found and verified %s' % download_path) | |
else: | |
print(statinfo.st_size) | |
raise Exception( | |
'Failed to verify %s. Can you get to it with a browser?' % filename) | |
return download_path | |
def read_data(filename): | |
"""Extract the first file enclosed in a zip file as a list of words.""" | |
with zipfile.ZipFile(filename) as f: | |
first_name, *_ = f.namelist() | |
data = tf.compat.as_str(f.read(first_name)).split() | |
return data | |
def build_dataset(words, vocabulary_size=VOCABULARY_SIZE): | |
"""Generates a dataset prepared for embeddings training.""" | |
count = [['UNK', -1]] | |
count.extend(collections.Counter(words).most_common(vocabulary_size - 1)) | |
dictionary = dict() | |
for word, _ in count: | |
dictionary[word] = len(dictionary) | |
data = list() | |
unk_count = 0 | |
for word in words: | |
if word in dictionary: | |
index = dictionary[word] | |
else: | |
index = 0 # dictionary['UNK'] | |
unk_count = unk_count + 1 | |
data.append(index) | |
count[0][1] = unk_count | |
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) | |
return data, count, dictionary, reverse_dictionary | |
class BatchGenerator: | |
def __init__(self, data, data_index=0): | |
self.data = data | |
self.data_index = data_index | |
def generate_batch(self, batch_size, num_skips, skip_window): | |
assert batch_size % num_skips == 0 | |
assert num_skips <= 2 * skip_window | |
batch = np.ndarray(shape=(batch_size), dtype=np.int32) | |
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) | |
span = 2 * skip_window + 1 # [ skip_window target skip_window ] | |
buffer = collections.deque(maxlen=span) | |
data, data_index = self.data, self.data_index | |
for _ in range(span): | |
buffer.append(data[data_index]) | |
data_index = (data_index + 1) % len(data) | |
for i in range(batch_size // num_skips): | |
target = skip_window # target label at the center of the buffer | |
targets_to_avoid = [skip_window] | |
for j in range(num_skips): | |
while target in targets_to_avoid: | |
target = random.randint(0, span - 1) | |
targets_to_avoid.append(target) | |
batch[i * num_skips + j] = buffer[skip_window] | |
labels[i * num_skips + j, 0] = buffer[target] | |
buffer.append(data[data_index]) | |
data_index = (data_index + 1) % len(data) | |
return batch, labels | |
def main(): | |
DATA_ROOT.mkdir(parents=True, exist_ok=True) | |
path = maybe_download(TEXT_DATA, 31344016) | |
words = read_data(path) | |
print('Data size', len(words)) | |
print_line() | |
data, count, dictionary, reverse_dictionary = build_dataset(words) | |
print('Most common words (+UNK)', count[:5]) | |
print('Sample data', data[:10]) | |
print('Data size:', len(data)) | |
print_line() | |
del words | |
gen = BatchGenerator(data) | |
print('Data:', [reverse_dictionary[di] for di in data[:8]]) | |
for num_skips, skip_window in [(2, 1), (4, 2)]: | |
gen.data_index = 0 | |
batch, labels = gen.generate_batch( | |
batch_size=8, num_skips=num_skips, skip_window=skip_window) | |
print('\nwith num_skips = %d and skip_window = %d:' % | |
(num_skips, skip_window)) | |
print('\tbatch:', [reverse_dictionary[bi] for bi in batch]) | |
print('\tlabels:', [reverse_dictionary[li] for li in labels.reshape(8)]) | |
batch_size = 128 | |
embedding_size = 128 | |
vocabulary_size = VOCABULARY_SIZE | |
valid_size = 16 | |
valid_window = 100 | |
valid_examples = np.array(random.sample(range(valid_window), valid_size)) | |
num_sampled = 64 | |
graph = tf.Graph() | |
with graph.as_default(), tf.device('/cpu:0'): | |
train_dataset = tf.placeholder(tf.int32, shape=[batch_size]) | |
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) | |
valid_dataset = tf.constant(valid_examples, dtype=tf.int32) | |
embeddings = tf.Variable( | |
tf.random_uniform( | |
[vocabulary_size, embedding_size], -1.0, 1.0)) | |
softmax_weights = tf.Variable( | |
tf.truncated_normal( | |
[vocabulary_size, embedding_size], | |
stddev=1.0 / math.sqrt(embedding_size))) | |
softmax_biases = tf.Variable(tf.zeros([vocabulary_size])) | |
embed = tf.nn.embedding_lookup(embeddings, train_dataset) | |
loss = tf.reduce_mean( | |
tf.nn.sampled_softmax_loss(weights=softmax_weights, | |
biases=softmax_biases, | |
inputs=embed, | |
labels=train_labels, | |
num_sampled=num_sampled, | |
num_classes=vocabulary_size)) | |
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss) | |
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) | |
normalized_embeddings = embeddings / norm | |
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset) | |
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings)) | |
skip_window = 1 | |
num_skips = 2 | |
num_steps = 100001 | |
report_freq = 10000 | |
top_k = 5 | |
with tf.Session(graph=graph) as session: | |
tf.global_variables_initializer().run() | |
print('Initialized') | |
average_loss = 0 | |
gen.data_index = 0 | |
for step in range(num_steps): | |
batch_x, batch_y = gen.generate_batch(batch_size=batch_size, | |
num_skips=num_skips, | |
skip_window=skip_window) | |
feed = {train_dataset: batch_x, train_labels: batch_y} | |
_, l = session.run([optimizer, loss], feed_dict=feed) | |
average_loss += l | |
if (step % report_freq) == 0: | |
print_line() | |
if step > 0: | |
average_loss = average_loss / report_freq | |
print('Average loss at step {:>6}: {:2.6f}' | |
.format(step, average_loss)) | |
average_loss = 0 | |
print('Nearest words:') | |
sim = similarity.eval() | |
for i in range(valid_size): | |
valid_word = reverse_dictionary[valid_examples[i]] | |
nearest = (-sim[i, :]).argsort()[1:(top_k + 1)] | |
words = [reverse_dictionary[nearest[k]] for k in range(top_k)] | |
joined = ', '.join(words) | |
print('[{:>15}]: {}'.format(valid_word.upper(), joined)) | |
final_embeddings = normalized_embeddings.eval() | |
print('Final embeddings:') | |
print(final_embeddings) | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment