Skip to content

Instantly share code, notes, and snippets.

@mbednarski
Created March 6, 2018 23:27
Show Gist options
  • Save mbednarski/ce4e82b0a12540359e68003cabe6a56b to your computer and use it in GitHub Desktop.
Save mbednarski/ce4e82b0a12540359e68003cabe6a56b to your computer and use it in GitHub Desktop.
embedding_dims = 5
W1 = Variable(torch.randn(embedding_dims, vocabulary_size).float(), requires_grad=True)
W2 = Variable(torch.randn(vocabulary_size, embedding_dims).float(), requires_grad=True)
num_epochs = 100
learning_rate = 0.001
for epo in range(num_epochs):
loss_val = 0
for data, target in idx_pairs:
x = Variable(get_input_layer(data)).float()
y_true = Variable(torch.from_numpy(np.array([target])).long())
z1 = torch.matmul(W1, x)
z2 = torch.matmul(W2, z1)
log_softmax = F.log_softmax(z2, dim=0)
loss = F.nll_loss(log_softmax.view(1,-1), y_true)
loss_val += loss.data[0]
loss.backward()
W1.data -= learning_rate * W1.grad.data
W2.data -= learning_rate * W2.grad.data
W1.grad.data.zero_()
W2.grad.data.zero_()
if epo % 10 == 0:
print(f'Loss at epo {epo}: {loss_val/len(idx_pairs)}')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment