Last active
April 26, 2019 01:44
-
-
Save oiacrasec/7681c063db707a9013a4761998d1695c to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from unittest import TestCase | |
import numpy as np | |
import matplotlib.pyplot as plt | |
class Perceptron(object): | |
def __init__(self, numero_entradas, loop=100, taxa_aprendizado=0.01, bias=1.0, pesos=None): | |
self.threshold = loop | |
self.learning_rate = taxa_aprendizado | |
self.weights = np.zeros(numero_entradas + 1) if not pesos else np.array([bias] + pesos) | |
self.errors = [] | |
def activation_function(self, inputs): | |
return np.dot(inputs, self.weights[1:]) + self.weights[0] | |
def predict(self, inputs): | |
activation = self.activation_function(inputs) | |
return np.where(activation >= 0, 1, -1) | |
def train(self, training_inputs, labels): | |
for _ in range(self.threshold): | |
errors = 0 | |
for inputs, label in zip(training_inputs, labels): | |
prediction = self.predict(inputs) | |
error = label - prediction | |
update = self.learning_rate * error | |
self.weights[1:] += update * inputs | |
self.weights[0] += update | |
errors += int(update != 0.0) | |
self.errors.append(errors) | |
return self | |
def retornar_pesos_bias(self): | |
return self.weights[1], self.weights[2], self.weights[0] | |
class Adaline(Perceptron): | |
def train(self, training_inputs, labels): | |
for _ in range(self.threshold): | |
output = self.activation_function(training_inputs) | |
errors = (labels - output) | |
self.weights[1:] += self.learning_rate * training_inputs.T.dot(errors) | |
self.weights[0] += self.learning_rate * errors.sum() | |
cost = (errors ** 2).sum() / 2.0 | |
self.errors.append(cost) | |
return self | |
class Teste(TestCase): | |
def test(self, classes): | |
for classe in classes: | |
self.assertEquals(classe.predict(np.array([1, 1])), 1) | |
self.assertEquals(classe.predict(np.array([1, 0])), -1) | |
self.assertEquals(classe.predict(np.array([0, 1])), -1) | |
self.assertEquals(classe.predict(np.array([0, 0])), -1) | |
class Plotagem(object): | |
def __init__(self, w1, w2, bias, title): | |
self.w1 = w1 | |
self.w2 = w2 | |
self.bias = bias | |
self.title = title | |
def plotar(self): | |
xvals = np.arange(-1, 3, 0.01) | |
newyvals = (((xvals * self.w2) * - 1) - self.bias) / self.w1 | |
plt.plot(xvals, newyvals, 'r-') | |
plt.title(self.title) | |
plt.xlabel('X1') | |
plt.ylabel('X2') | |
plt.axis([-1, 2, -1, 2]) | |
plt.plot([0, 1, 0], [0, 0, 1], 'b^') | |
plt.plot([1], [1], 'go') | |
plt.xticks([0, 1]) | |
plt.yticks([0, 1]) | |
plt.show() | |
def main(): | |
""" | |
Tabela verdade AND de referência | |
Para a cláusula OR basta ajustar a tabela verdade e os resultados (variável labels, tbm conhecido por y) | |
e não esquecer de ajustar a classe de teste, caso contrário não irá plotar | |
x1 x2 y | |
1 1 1 | |
1 0 -1 | |
0 1 -1 | |
0 0 -1 | |
""" | |
training_inputs = np.array([ | |
np.array([1, 1]), # x1, x2 (0) | |
np.array([1, 0]), # x1, x2 (1) | |
np.array([0, 1]), # x1, x2 (2) | |
np.array([0, 0]) # x1, x2 (3) | |
]) | |
labels = np.array([1, -1, -1, -1]) # y(0), y(1), y(2), y(3) | |
perceptron = Perceptron( | |
numero_entradas=training_inputs[0].__len__(), | |
pesos=[0.7, 0.3], | |
bias=0.8 | |
).train(training_inputs, labels) | |
p_p1, p_p2, p_bias = perceptron.retornar_pesos_bias() | |
adaline = Adaline( | |
numero_entradas=training_inputs[0].__len__(), | |
pesos=[0.7, 0.3], | |
bias=0.8 | |
).train(training_inputs, labels) | |
a_p1, a_p2, a_bias = adaline.retornar_pesos_bias() | |
Teste().test([perceptron, adaline]) | |
Plotagem(p_p1, p_p2, p_bias, 'Perceptron').plotar() | |
Plotagem(a_p1, a_p2, a_bias, 'Adaline').plotar() | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment