Created
February 21, 2018 18:34
-
-
Save ghego/f76f72bd53e2ac6472ef5921c026d041 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def nn(X, y, hidden_dim=10, learning_rate=0.01, epochs=100, debug=False): | |
input_dim = X.shape[1] | |
output_dim = y.shape[1] | |
# Make our model | |
model = dict( | |
w0 = np.random.randn(input_dim, hidden_dim), | |
w1 = np.random.randn(hidden_dim, output_dim) | |
) | |
losses = [] | |
def sigmoid(x, derive=False): | |
if derive: | |
return x * (1-x) | |
return 1/(1+np.exp(-x)) | |
def MSE(y, Y): | |
return np.mean((y - Y) ** 2) | |
def run(layer0, model): | |
layer1 = sigmoid(np.dot(layer0, model['w0'])) | |
layer2 = sigmoid(np.dot(layer1, model['w1'])) | |
return layer1, layer2 | |
def train_step(model): | |
## Forward | |
layer1, layer2 = run(X, model) | |
## Backprop | |
l2_error = layer2 - y | |
l2_delta = l2_error * sigmoid(layer2, derive=True) | |
l1_error = l2_delta.dot(model['w1'].T) | |
l1_delta = l1_error * sigmoid(layer1, derive=True) | |
## Store the error for plotting | |
loss = MSE(layer2, y) / 2 | |
losses.append(loss) | |
## Update weights | |
model['w1'] -= learning_rate * layer1.T.dot(l2_delta) | |
model['w0'] -= learning_rate * X.T.dot(l1_delta) | |
return model | |
for i in range(epochs): | |
model = train_step(model) | |
if debug: | |
plt.plot(losses) | |
plt.xlabel("Epoch") | |
plt.ylabel("Loss") | |
## Final prediction | |
layer1, layer2 = run(X, model) | |
return layer2 | |
y_pred = nn(X, y, debug=True, epochs=1000) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment