Skip to content

Instantly share code, notes, and snippets.

@starhopp3r
Created April 4, 2018 07:25
Show Gist options
  • Save starhopp3r/c4ccd78b0200883c3f5254f14b65ccc8 to your computer and use it in GitHub Desktop.
Save starhopp3r/c4ccd78b0200883c3f5254f14b65ccc8 to your computer and use it in GitHub Desktop.
import torch
from torch.autograd import Variable
# Datatype of tensors
dtype = torch.FloatTensor
# Hidden nodes
hidden_nodes = 10
# Variables
x_logic = torch.IntTensor([[0, 0], [0, 1], [1, 0], [1, 1]])
y_logic = torch.IntTensor([[0], [1], [1], [0]])
X = Variable(x_logic.type(dtype), requires_grad=False)
y = Variable(y_logic.type(dtype), requires_grad=False)
# Weights
layer_1_w = torch.rand(X.shape[1], hidden_nodes).type(dtype)
w1 = Variable(layer_1_w, requires_grad=True)
layer_2_w = torch.rand(w1.shape[1], y.shape[1]).type(dtype)
w2 = Variable(layer_2_w, requires_grad=True)
# Learning rate
learning_rate = 1e-2
for i in range(1000):
y_pred = X.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum()
print(i, loss.data[0])
loss.backward()
w1.data -= learning_rate * w1.grad.data
w2.data -= learning_rate * w2.grad.data
w1.grad.data.zero_()
w2.grad.data.zero_()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment