Last active
July 22, 2019 19:07
-
-
Save crawftv/12d093599b4d2bec94752312435dab42 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
X = np.array([[0,0,1],[0,1,1],[1,0,1],[0,1,0],[1,0,0],[1,1,1],[0,0,0]]) | |
y = np.array([[0],[1],[1],[1],[1],[0],[0]]) | |
def sigmoid(s): | |
return 1/1+np.exp(-s) | |
def sigmoid_prime(z): | |
"""Derivative of the sigmoid function.""" | |
return sigmoid(z)*(1-sigmoid(z)) | |
def quadratic_cost(true_values, predictions): | |
return predictions - true_values | |
class InputLayer: | |
def __init__(self,input_data): | |
self.shape=input_data.shape | |
self.z = input_data | |
class HiddenLayer: | |
def __init__(self,nodes,activation_function,activation_function_derivative, upper_layer,learning_rate): | |
self.activation_function = activation_function | |
self.activation_function_derivative = activation_function_derivative | |
self.upper_layer = upper_layer | |
self.weights = 2 * np.random.random( (self.upper_layer.shape[1] ,nodes)) - 1 | |
self.bias = np.random.random( (self.upper_layer.shape[0],1) ) | |
self.learning_rate = learning_rate | |
self.shape = (self.upper_layer.shape[0], self.weights.shape[1]) | |
def add_lower_layer(self,l): | |
self.lower_layer=l | |
def feedforward(self): | |
self.z = np.dot(self.upper_layer.S, self.weights) + self.bias | |
self.S = 1/(1+np.exp(-self.z)) | |
def backprop(self): | |
#Tricky part. This is Backprop. | |
self.delta = np.dot(self.lower_layer.delta, self.lower_layer.weights.transpose()) * self.activation_function_derivative(self.S) | |
self.harp_b = self.delta | |
self.harp_w = np.dot(self.upper_layer.z.transpose(), self.delta) | |
self.weights = self.weights + (self.learning_rate/self.delta.shape[0]) * self.harp_w | |
self.bias = self.bias + (self.learning_rate/self.delta.shape[0]) * self.harp_b | |
class OutputLayer: | |
def __init__(self,nodes,activation_function,activation_function_derivative, upper_layer,learning_rate,cost_function): | |
self.activation_function = activation_function | |
self.upper_layer= upper_layer | |
self.cost_function = cost_function | |
self.activation_function_derivative = activation_function_derivative | |
self.weights= 2 * np.random.random(( self.upper_layer.shape[1], 1)) - 1 | |
self.bias = np.random.random((self.upper_layer.shape[0],1)) | |
self.learning_rate = learning_rate | |
self.shape = (self.upper_layer.shape[0],self.weights.shape[1]) | |
def feedforward(self): | |
self.z = np.dot(self.upper_layer.S,self.weights) + self.bias | |
self.S = 1/(1+np.exp(-self.z)) | |
def backprop(self,y): | |
self.delta = self.cost_function(y, self.S) * self.activation_function_derivative(self.S) | |
#uses harp becuase the word nabla weirds me out. nabla is the ∇ | |
self.harp_b = self.delta | |
self.harp_w = np.dot(self.upper_layer.z.T, self.delta) | |
self.weights = self.weights + (self.learning_rate/self.delta.shape[0]) * self.harp_w | |
self.bias = self.bias + (self.learning_rate/self.delta.shape[0]) * self.harp_b | |
#### | |
#### | |
i = InputLayer(X) | |
l = HiddenLayer(4,sigmoid,sigmoid_prime,i,0.1) | |
l2 = OutputLayer(4,sigmoid,sigmoid_prime, l, 0.1,quadratic_cost) | |
l.add_lower_layer(l2) | |
for i in range(1,1000): | |
l.feedforward() | |
l2.feedforward() | |
l2.backprop(y) | |
l.backprop() | |
print(l2.z) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment