Created
December 2, 2012 14:53
-
-
Save MhdSyrwan/4189169 to your computer and use it in GitHub Desktop.
customized neural network class
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# original code is taken from | |
# https://github.com/SergioFierens | |
class NeuralNetwork | |
# creates an empty neural network | |
# @options | |
# => structure network structure | |
# => learning_rate | |
# => momentum | |
# => propagation_function | |
# => derivative_propagation_function | |
# => initial_weight_function | |
def initialize(options) | |
@default_options = { | |
structure: [1, 2, 1], | |
initial_weight_function: lambda { |n, i, j| ((rand 2000)/1000.0) - 1}, | |
propagation_function: lambda { |x| 1/(1+Math.exp(-1*(x))) },#lambda { |x| Math.tanh(x) } | |
derivative_propagation_function: lambda { |y| y*(1-y) },#lambda { |y| 1.0 - y**2 } | |
learning_rate: 0.25, | |
momentum: 0 # 0.1 | |
} | |
@options = @default_options.merge options | |
end | |
def structure | |
@options[:structure] | |
end | |
# Evaluates the input. | |
# Example: | |
# net = Backpropagation.new([4, 3, 2]) | |
# net.eval([25, 32.3, 12.8, 1.5]) | |
# # => [0.83, 0.03] | |
def eval(input_values) | |
# check input | |
reset_network if !@weights | |
feedforward(input_values) | |
return @nodes.last.clone | |
end | |
# This method trains the network using the backpropagation algorithm. | |
# | |
# input: Networks input | |
# | |
# output: Expected output for the given input. | |
# | |
# This method returns the network error: | |
# => 0.5 * sum( (expected_value[i] - output_value[i])**2 ) | |
def train(inputs, outputs) | |
eval(inputs) | |
backpropagate(outputs) | |
calculate_error(outputs) | |
end | |
protected | |
# reset the neural network | |
# this includes resetting | |
# => node structure | |
# => weights | |
# => momentum need | |
def reset_network | |
init_nodes | |
init_weights | |
init_momentum_need | |
end | |
# create the neurons | |
def init_nodes | |
# for each layer | |
@nodes = Array.new(structure.length) do |i| | |
Array.new(structure[i], 1.0) | |
end | |
#TODO CHECK BIAS PROBLEMS | |
end | |
def init_weights | |
@weights = Array.new(structure.length-1) do |i| | |
link_sources = structure[i] | |
link_targets = structure[i+1] | |
Array.new(link_sources) do |j| | |
Array.new(link_targets) do |k| | |
@options[:initial_weight_function].call(i, j, k) | |
end | |
end | |
end | |
end | |
def init_momentum_need | |
@last_changes = Array.new(@weights.length) do |w| | |
Array.new(@weights[w].length) do |i| | |
Array.new(@weights[w][i].length, 0.0) | |
end | |
end | |
end | |
def backpropagate(output) | |
#TODO check input | |
calc_output_diffs(output) | |
calc_internal_diffs | |
update_weights | |
end | |
def calc_output_diffs(expected_output) | |
output_layer = @nodes.last | |
output_diffs = [] | |
output_layer.each_index do |output_index| | |
error = expected_output[output_index] - output_layer[output_index] | |
diff = @options[:derivative_propagation_function].call( | |
output_layer[output_index] * error) | |
output_diffs << diff | |
end | |
@diffs = [output_diffs] | |
end | |
def calc_internal_diffs | |
prev_diffs = @diffs.last | |
(@nodes.length-2).downto(1) do |layer_index| | |
layer_diffs = [] | |
@nodes[layer_index].each_index do |node_index| | |
error = 0.0 | |
structure[layer_index+1].times do |next_layer_node_index| | |
error += prev_diffs[next_layer_node_index] * @weights[layer_index][node_index][next_layer_node_index] | |
end | |
layer_diffs[node_index] = (@options[:derivative_propagation_function].call(@nodes[layer_index][node_index]) * error) | |
end | |
prev_diffs = layer_diffs | |
@diffs.unshift layer_diffs | |
end | |
end | |
def update_weights | |
(@weights.length-1).downto(0) do |layer| | |
@weights[layer].each_index do |node| | |
@weights[layer][node].each_index do |link| | |
change = @diffs[layer][link] * @nodes[layer][node] | |
@weights[layer][node][link] += @options[:learning_rate] * change + @options[:momentum] * @last_changes[layer][node][link] | |
@last_changes[layer][node][link] = change # for momentum | |
end | |
end | |
end | |
end | |
# pass the input | |
def feedforward(input_values) | |
input_values.each_index do |input_index| | |
@nodes.first[input_index] = input_values[input_index] | |
end | |
@weights.each_index do |layer| | |
structure[layer+1].times do |link| | |
sum = 0.0 | |
@nodes[layer].each_index do |node| | |
sum += (@nodes[layer][node] * @weights[layer][node][link]) | |
end | |
@nodes[layer+1][link] = @options[:propagation_function].call(sum) | |
end | |
end | |
end | |
# Calculate quadratic error for a expected output value | |
# Error = 0.5 * sum( (expected_value[i] - output_value[i])**2 ) | |
def calculate_error(expected_output) | |
output_values = @nodes.last | |
error = 0.0 | |
expected_output.each_index do |output_index| | |
error += | |
0.5*(output_values[output_index]-expected_output[output_index])**2 | |
end | |
return error | |
end | |
end | |
ann = NeuralNetwork.new(structure: [1, 2, 3]) | |
ann.train([1],[4,4,4]) | |
puts ann.eval([1]) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment