Created
March 26, 2018 04:28
-
-
Save shabnamrani31/c0d6e98143c9be318b5f63a3948f2c56 to your computer and use it in GitHub Desktop.
MLAssignment3
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
""" | |
Created on Mon Mar 26 03:23:32 2018 | |
@author: Shabnam Rani | |
""" | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from sklearn import datasets | |
#Dataset Generation | |
np.random.seed(0) | |
X, y = sklearn.datasets.make_moons(200, noise=0.20) | |
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral) | |
#Classifier | |
clf = sklearn.linear_model.LogisticRegressionCV() | |
clf.fit(X, y) | |
plot_decision_boundary(lambda x: clf.predict(x)) | |
Exp = len(X) | |
Dimensions = 2 # no of Dimensions | |
OutputDimensions = 2 # Dimensions of output layer | |
epsilon = 0.01 # Alpha or Epsilon | |
regularizinglambda = 0.01 | |
def calculate_loss(model): | |
# Forward propagation | |
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] | |
z1 = X.dot(W1) + b1 | |
a1 = np.tanh(z1) | |
z2 = a1.dot(W2) + b2 | |
exp_scores = np.exp(z2) | |
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) | |
# Calculating the loss | |
corect_logprobs = -np.log(probs[range(Exp), y]) | |
data_loss = np.sum(corect_logprobs) | |
#summing up the regularizer to lambda | |
data_loss += regularizinglambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2))) | |
return 1./Exp * data_loss | |
def predict(model, x): | |
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] | |
# Forward propagation | |
z1 = x.dot(W1) + b1 | |
a1 = np.tanh(z1) | |
z2 = a1.dot(W2) + b2 | |
exp_scores = np.exp(z2) | |
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) | |
return np.argmax(probs, axis=1) | |
def Buildingofthemodel(nodesinhiddenLayer, passes=200, loss=False): | |
# Initialize the parameters to random values. We need to learn these. | |
np.random.seed(0) | |
W1 = np.random.randn(Dimensions, nodesinhiddenLayer) / np.sqrt(Dimensions) | |
b1 = np.zeros((1, nodesinhiddenLayer)) | |
W2 = np.random.randn(nodesinhiddenLayer, OutputDimensions) / np.sqrt(nodesinhiddenLayer) | |
b2 = np.zeros((1, OutputDimensions)) | |
model = {} | |
# gd for each batch | |
for i in xrange(0, passes): | |
# Forward propagation | |
z1 = X.dot(W1) + b1 | |
a1 = np.tanh(z1) | |
z2 = a1.dot(W2) + b2 | |
exp_scores = np.exp(z2) | |
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) | |
# Backward propagation | |
delta3 = probs | |
delta3[range(Exp), y] -= 1 | |
dW2 = (a1.T).dot(delta3) | |
db2 = np.sum(delta3, axis=0, keepdims=True) | |
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2)) | |
dW1 = np.dot(X.T, delta2) | |
db1 = np.sum(delta2, axis=0) | |
dW2 += regularizinglambda * W2 | |
dW1 += regularizinglambda * W1 | |
#Gd parameter updation | |
W1 += -epsilon * dW1 | |
b1 += -epsilon * db1 | |
W2 += -epsilon * dW2 | |
b2 += -epsilon * db2 | |
#new parameters to model | |
model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2} | |
if loss and i % 1000 == 0: | |
print("Loss after iteration %i: %f"%(i,calculate_loss(model))) | |
return model | |
plt.figure(figsize=(16, 32)) | |
HiddenlayerDms = [1, 2, 3, 4, 5, 20, 50] | |
for i, nodesinhiddenLayer in enumerate(HiddenlayerDms): | |
plt.subplot(5, 2, i+1) | |
model =Buildingofthemodel(nodesinhiddenLayer) | |
plot_decision_boundary(lambda x: predict(model, x)) | |
plt.show() | |
model =Buildingofthemodel(2,loss=True) | |
plot_decision_boundary(lambda x: predict(model, x)) | |
plt.title("Plotting Decision Boundry") | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment