Created
July 25, 2015 18:17
-
-
Save itarato/4c4462467e832997219c to your computer and use it in GitHub Desktop.
Small neural network trainer.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import numpy as np | |
| class NeuralNetwork: | |
| def __init__(self, data, layer_sizes): | |
| """ | |
| :param data: (M)x(N+1) matrix of inout data, Y is in last column. | |
| :param iteration: Iteration count for training. | |
| """ | |
| self.data = data | |
| self.rand_range = 3 | |
| self.layer_sizes = layer_sizes | |
| self.m = len(data) | |
| self.n = len(data[0]) - 1 | |
| self.y = data[:,self.n].reshape((self.m, 1)) | |
| self.x = data[:,:self.n] | |
| self.weights = [] | |
| self.init_weights() | |
| def init_weights(self): | |
| prev_size = self.n | |
| for size in self.layer_sizes: | |
| self.weights.append(self.get_zero_avg_weights(prev_size, size)) | |
| prev_size = size | |
| def get_zero_avg_weights(self, h, w): | |
| return np.random.rand(h, w) * self.rand_range - (self.rand_range * 0.5) | |
| def sigmoid(self, x): | |
| return 1 / (1 + np.exp(-x)) | |
| def train(self, iteration): | |
| layer_count = len(self.layer_sizes) | |
| activation = [None] * (layer_count + 1) | |
| delta = [None] * layer_count | |
| activation[0] = self.x | |
| for i in range(iteration): | |
| # Forward propagation. | |
| for i in range(len(self.weights)): | |
| activation[i + 1] = self.sigmoid(activation[i].dot(self.weights[i])) | |
| # Backward propagation. | |
| delta[layer_count - 1] = (self.y - activation[layer_count]) * (activation[layer_count] * (1 - activation[layer_count])) | |
| for i in range(layer_count - 2, -1, -1): | |
| delta[i] = delta[i + 1].dot(self.weights[i + 1].T) * (activation[i + 1] * (1 - activation[i + 1])) | |
| for i in range(layer_count - 1, -1, -1): | |
| self.weights[i] += activation[i].T.dot(delta[i]) | |
| print(np.concatenate((self.y, activation[layer_count], np.abs(self.y - activation[layer_count])), axis=1)) | |
| if __name__ == "__main__": | |
| np.random.seed(1) | |
| fin = file('logreg.txt', 'r') | |
| npdata = np.genfromtxt(fin, delimiter=',') | |
| trainer = NeuralNetwork(npdata, [3, 3, 3, 1]) | |
| trainer.train(1000) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment