-
-
Save pyradd/359a05c8172f921e8100073caf171970 to your computer and use it in GitHub Desktop.
Simple neural network to predict XOR gate in python
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
A simple neural network with 1 hidden layer and 4 neurons, an enhancement to the previous logistic regression to compute the XOR | |
@Author : Vinu Priyesh V.A. | |
""" | |
import numpy as np | |
#Compute functions for OR, AND, XOR, this will be used to generate the test set and to validate our results | |
def compute(x,m,label): | |
if(label == "XOR"): | |
return np.logical_xor(x[0,:],x[1,:]).reshape(1,m).astype(int) | |
if(label == "AND"): | |
return np.logical_and(x[0,:],x[1,:]).reshape(1,m).astype(int) | |
if(label == "OR"): | |
return np.logical_or(x[0,:],x[1,:]).reshape(1,m).astype(int) | |
#Validation functions for OR, AND, XOR, this will validate whether the predicted results are correct | |
def validate(x,y,m,label): | |
y1 = compute(x,m,label) | |
return np.sum(y1==y)/m*100 | |
#Simple sigmoid, it is better to use ReLU instead | |
def sigmoid(z): | |
s = 1 / (1 + np.exp(-z)) | |
return s | |
#Simple tanh | |
def tanh(x): | |
return np.tanh(x) | |
#Back prop to get the weight and bias computed using gradient descend | |
def back_prop(m,w1,w2,b1,b2,X,Y,iterations,learning_rate): | |
for i in range(iterations): | |
Y1,A1,A2 = forward_prop(m,w1,w2,b1,b2,X) | |
dz2 = A2 - Y | |
if(iterations%1000==0): | |
logprobs = np.multiply(np.log(A2), Y) + np.multiply((1 - Y), np.log(1 - A2)) | |
cost = - np.sum(logprobs) / m | |
print("cost : {}".format(cost)) | |
dw2 = (1 / m) * np.dot(dz2,A1.T) | |
db2 = (1 / m) * np.sum(dz2,axis=1,keepdims=True) | |
dz1 = np.dot(w2.T,dz2) * (1-np.power(A1,2)) | |
dw1 = (1 / m) * np.dot(dz1,X.T) | |
db1 = (1 / m) * np.sum(dz1,axis=1,keepdims=True) | |
w1 = w1 - learning_rate * dw1 | |
b1 = b1 - learning_rate * db1 | |
w2 = w2 - learning_rate * dw2 | |
b2 = b2 - learning_rate * db2 | |
return w1,b1,w2,b2 | |
#Forward prop to get the predictions | |
def forward_prop(m,w1,w2,b1,b2,X): | |
Y = np.zeros((1, m)) | |
z1 = np.dot(w1,X) + b1 | |
A1 = tanh(z1) | |
z2 = np.dot(w2,A1) + b2 | |
A2 = sigmoid(z2) | |
for i in range(m): | |
Y[0, i] = 1 if A2[0, i] > 0.5 else 0 | |
return Y,A1,A2 | |
def model(m,iterations,learning_rate,label,neurons): | |
print("\nmodel : {}".format(label)) | |
w1 = np.random.randn(neurons,2) * 0.01 | |
w2 = np.random.randn(1,neurons) * 0.01 | |
b1 = np.zeros((neurons,1)) | |
b2 = np.zeros((1,1)) | |
#Training phase | |
X_train = np.random.randint(2,size=(2,m)) | |
Y_train = compute(X_train,m,label); | |
w1,b1,w2,b2 = back_prop(m,w1,w2,b1,b2,X_train,Y_train,iterations,learning_rate) | |
Y1,A1,A2 = forward_prop(m,w1,w2,b1,b2,X_train) | |
P_train = validate(X_train,Y1,m,label) | |
#Testing phase | |
m*=2 | |
X_test = np.random.randint(2,size=(2,m)) | |
Y1,A1,A2 = forward_prop(m,w1,w2,b1,b2,X_test) | |
P_test = validate(X_test,Y1,m,label) | |
print("Training accuracy : {}%\n\rTesting accuracy : {}%".format(P_train,P_test)) | |
return P_train,P_test | |
m=1000 | |
iterations = 1000 | |
learning_rate = 1.2 | |
#model(m,iterations,learning_rate,"OR") | |
#model(m,iterations,learning_rate,"AND") | |
ptrain, ptest = model(m,iterations,learning_rate,"XOR",4) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment