Created
December 9, 2022 22:34
-
-
Save Caaz/7711278f0b6d0801166c7b1975e65437 to your computer and use it in GitHub Desktop.
Generic Multi Layered Perceptron, via chatGPT
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class MultiLayeredPerceptron: | |
def __init__(self, *layers): | |
self.layers = layers | |
def forward(self, input): | |
# Perform a forward pass through the layers of the network | |
for layer in self.layers: | |
output = layer.forward(input) | |
input = output | |
return output | |
def train(self, input, expected_output, learning_rate): | |
# Perform a forward pass through the network to get the actual output | |
actual_output = self.forward(input) | |
# Calculate the error by subtracting the expected output from the actual output | |
error = expected_output - actual_output | |
# Backpropagate the error through the layers of the network | |
for layer in self.layers[::-1]: | |
error = layer.backpropagate(error, learning_rate) | |
class Layer: | |
def __init__(self, weights, biases): | |
self.weights = weights | |
self.biases = biases | |
def forward(self, input): | |
# Calculate the output using the weights, biases, and sigmoid function | |
output = 1 / (1 + math.exp(-(self.weights * input + self.biases))) | |
return output | |
def backpropagate(self, error, learning_rate): | |
# Update the weights and biases based on the error and learning rate | |
self.weights += learning_rate * error | |
self.biases += learning_rate * error | |
# Calculate the error for the previous layer | |
previous_layer_error = error * self.weights | |
return previous_layer_error |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment