Last active
July 8, 2017 16:38
-
-
Save masterdezign/28642ba7a79bd98bda84dd96da39faa2 to your computer and use it in GitHub Desktop.
Backpropagation in Python
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from random import random | |
from numpy import tanh | |
from scipy import array, vectorize, transpose | |
class Network: | |
""" Class builds neural network | |
""" | |
def __init__(self, inputs, outputs, hlayers=None, activation=tanh, | |
learning_rate=1.): | |
""" hlayers is a list containing number of neurons in each hidden layer | |
activation is a activation function applied to net_j | |
""" | |
self.__scaling_factor = 100. | |
# Transfer function: array -> array | |
self.activate = vectorize(activation) | |
self.learning_rate = learning_rate | |
self.layers = [] | |
self._iteration = 0 | |
if hlayers: | |
for i in xrange(len(hlayers) + 1): | |
# From input layer | |
if i == 0: | |
self._add_weights(hlayers[i], inputs) | |
# To output layer | |
elif i == len(hlayers): | |
self._add_weights(outputs, hlayers[i - 1]) | |
# Between hidden layers | |
else: | |
self._add_weights(hlayers[i], hlayers[i - 1]) | |
def _rate(self): | |
""" Returns learning rate | |
""" | |
return self.learning_rate | |
def _scale_in(self, vect): | |
""" Scales (packs) input vector | |
""" | |
return vect / self.__scaling_factor | |
def _scale_out(self, vect): | |
""" Scales output vector back to initial scale | |
""" | |
return vect * self.__scaling_factor | |
def _add_weights(self, rows, cols, max_rand=0.1): | |
""" Adds matrix with random values | |
""" | |
self.layers.append( | |
array([[random() * max_rand for _ in xrange(cols)] | |
for _ in xrange(rows)])) | |
def _update_weights(self, delta, o_activation): | |
""" Updates weights in given layer | |
""" | |
for i in xrange(len(self.layers)): | |
d_j = transpose(array([delta[i]])) | |
self.layers[i] += self._rate() * d_j * o_activation[i] | |
def _propagate(self, vect): | |
""" Propagates signal through the network | |
""" | |
vect = self._scale_in(vect) | |
activation = [vect] | |
for layer in self.layers: | |
vect = layer.dot(vect) | |
vect = self.activate(vect) | |
activation.append(vect) | |
# activation.append(self._scale_out(activation.pop())) | |
return activation | |
def _back_propagate(self, real, expected): | |
""" Back error propagation with weights correction | |
""" | |
delta = [] | |
# Calculate err of output layer | |
o_j = real[-1] | |
expected = self._scale_in(expected) | |
delta.append( (expected - o_j) * (1. - o_j) * o_j ) | |
# Calculate errs of hidden layers | |
layer_no = len(self.layers) - 1 # Last layer | |
while layer_no > 0: # Input layer has "no error" | |
last_err = delta[-1] # Get previous err from the end of err list | |
# print self.layers[layer_no], '@', last_err, '$' | |
s = last_err.dot(self.layers[layer_no]) | |
o_j = real[layer_no] | |
delta.append( s * (1. - o_j) * o_j ) | |
layer_no -= 1 | |
self._update_weights(delta[::-1], real) | |
def compute(self, vector): | |
""" Computes final value for input vector | |
using current network weights | |
""" | |
vector = transpose(array(vector)) | |
return self._scale_out(self._propagate(vector)[-1]) | |
def train(self, sample, expected): | |
""" sample and expected -- lists presenting input and output vectors | |
""" | |
self._iteration += 1 | |
sample = transpose(array(sample)) | |
real = self._propagate(sample) | |
expected = transpose(array(expected)) | |
self._back_propagate(real, expected) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment