Skip to content

Instantly share code, notes, and snippets.

@frizz925
Created September 16, 2017 07:35
Show Gist options
  • Save frizz925/ea2e4f92d358b51ac8ff6d5385622a02 to your computer and use it in GitHub Desktop.
Save frizz925/ea2e4f92d358b51ac8ff6d5385622a02 to your computer and use it in GitHub Desktop.
import numpy as np
np.random.seed(1)
inputs = np.array([
[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1]
])
outputs = np.vstack([0, 1, 1, 0])
inputSize = 3
hiddenSize = 3
outputSize = 1
class Layer:
def __init__(self, weights, biases):
self.weights = weights
self.biases = biases
self.outputs = None
self.inputs = None
W1 = 2 * np.random.randn(inputSize, hiddenSize) - 1
b1 = 2 * np.random.randn(hiddenSize) - 1
W2 = 2 * np.random.randn(hiddenSize, outputSize) - 1
b2 = 2 * np.random.randn(outputSize) - 1
layers = [
Layer(W1, b1),
Layer(W2, b2)
]
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def desigmoid(x):
return x * (1 - x)
def forwardPropagate(inputs, layer):
layer.inputs = inputs
layer.outputs = sigmoid(np.dot(inputs, layer.weights) + layer.biases)
return layer.outputs
def backwardPropagate(layer, error):
delta = error * desigmoid(layer.outputs)
return delta
def meanSquareError(predicted, outputs):
return np.sum(0.5 * (outputs - predicted)**2, axis=1)
def predict(inputs):
# forward propagate
predicted = inputs
for layer in layers:
predicted = forwardPropagate(predicted, layer)
return predicted
def train(predicted, expected):
# back propagate
error = expected - predicted
for layer in layers[::-1]:
delta = backwardPropagate(layer, error)
error = delta.dot(layer.weights.T)
layer.weights += layer.inputs.T.dot(delta)
for i in range(60000):
predicted = predict(inputs)
loss = meanSquareError(predicted, outputs)
if i % 10000 == 0:
print("Error:", np.round(loss, 2))
train(predicted, outputs)
print("Inputs:", inputs)
print("Predicted:", predict(inputs))
print("Expected:", outputs)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment