Skip to content

Instantly share code, notes, and snippets.

@manveru
Created October 5, 2010 06:30
Show Gist options
  • Select an option

  • Save manveru/611120 to your computer and use it in GitHub Desktop.

Select an option

Save manveru/611120 to your computer and use it in GitHub Desktop.
class Neuron
attr_accessor :learning_rate, :weights, :last_activation
def initialize(learning_rate, num_inputs)
self.learning_rate = learning_rate
self.weights = Array.new(num_inputs){ rand }
self.last_activation = 0
end
def forward_propagation(data)
sum = weights.zip(data).map{|x, y| x * y }.inject(:+)
self.last_activation = 1 / (1 + Math.exp(-sum))
end
def backward_propagation(error_term, data)
self.weights = weights.zip(data).map{|x, y|
x + learning_rate * error_term * y
}
end
end
class NeuralNetwork
attr_accessor :learning_rate, :num_hidden_neurons, :hidden_neurons,
:output_neurons, :hidden_activations, :output_activations, :output_errors
def initialize(num_inputs, num_hidden_neurons, num_outputs)
self.learning_rate = 0.1
self.num_hidden_neurons = num_hidden_neurons
self.hidden_neurons = Array.new(num_hidden_neurons){
Neuron.new(learning_rate, num_inputs)
}
self.output_neurons = Array.new(num_outputs){
Neuron.new(learning_rate, hidden_neurons.length)
}
end
def find_max(activation_values)
activation_values.each_with_index.max.last
end
def forward_propagation(data)
self.hidden_activations = hidden_neurons.map{|neuron|
neuron.forward_propagation(data)
}
self.output_activations = output_neurons.map{|neuron|
neuron.forward_propagation(hidden_activations)
}
output_activations.each_with_index.max.last
end
def backward_propagation(data, desired_action)
forward_propagation(data)
self.output_errors, error_term, weight_delta_h = [], 0, 0
output_neurons.each_with_index do |output_neuron, i|
fire = i == desired_action ? 1 : 0
last_activation = output_neuron.last_activation
error_term = (fire - last_activation) *
last_activation * (1 - last_activation)
output_errors << error_term
output_neuron.backward_propagation(error_term, hidden_activations)
end
hidden_neurons.each_with_index do |hidden_neuron, i|
weight_delta_h = last_activation = 0
output_neurons.each_with_index do |output_neuron, j|
fire = j == desired_action ? 1 : 0
last_activation = hidden_neuron.last_activation
weight_delta_h = weight_delta_h + (fire - last_activation) *
last_activation * (1 - last_activation) * output_neuron.weights[i]
end
error_term = weight_delta_h * last_activation * (1 - last_activation)
hidden_neuron.backward_propagation(error_term, data)
end
end
end
def analyze(result)
0 == result ? "lowercase" : "uppercase"
end
nn = NeuralNetwork.new(8, 8, 2)
lowercase = ('a'..'z').map{|letter|
[0] + letter.ord.to_s(2).each_char.map{|c| c.to_i }
}
uppercase = ('A'..'Z').map{|letter|
[0] + letter.ord.to_s(2).each_char.map{|c| c.to_i }
}
puts "Untrained:"
puts "a is #{analyze(nn.forward_propagation(lowercase[0]))}"
puts "A is #{analyze(nn.forward_propagation(uppercase[0]))}"
100.times do
lowercase.each{|letter| nn.backward_propagation(letter, 0) }
uppercase.each{|letter| nn.backward_propagation(letter, 1) }
end
puts
puts "Trained:"
puts "a is #{analyze(nn.forward_propagation(lowercase[0]))}"
puts "A is #{analyze(nn.forward_propagation(uppercase[0]))}"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment