Skip to content

Instantly share code, notes, and snippets.

View angadsinghsandhu's full-sized avatar
🎯
Focusing

Angad Sandhu angadsinghsandhu

🎯
Focusing
View GitHub Profile
# Full Code at : https://colab.research.google.com/gist/angadsinghsandhu/2b329be4e90e51eef457997d1a72ec81/esel3d_beta-demo-colab.ipynb
'''
Func : AndhaBT
A function that calculates the number of
instagram stories that one person studying
in 7th semester MIT might see over the course
of this year. This function takes into account
the number of people in your college as well as
the factions of people that would not post
@angadsinghsandhu
angadsinghsandhu / regression_eg.py
Created December 16, 2020 08:06
regression model example code
from Framework.RegressionFramework import NeuralNetwork
from Framework.predict import predict_regression as predict
from Framework.normalize import normalize
from Data_Creation import regression_data
def run():
# getting data
x_train, y_train = regression_data.data(values=1000)
from Framework.ClassificationFramework import NeuralNetwork
from Framework.predict import predict_classification as predict
from Framework.normalize import normalize
from Data_Creation import odd_even_data
def run():
# getting data
x_train, y_train = odd_even_data.data()
# normalizing data
@angadsinghsandhu
angadsinghsandhu / predict.py
Last active December 2, 2020 08:37
predicting input values
# Predicting input values
def predict(self, num):
# setting input as the first layer
self.z0 = num
# forward propagating and returning the result
return self.forwardprop()[0]
@angadsinghsandhu
angadsinghsandhu / resetloss.py
Created December 1, 2020 05:23
Resetting Loss
# reset loss after each iteration
def resetloss(self):
self.loss = 0
@angadsinghsandhu
angadsinghsandhu / nn_loss.py
Created December 1, 2020 05:22
Calculating Loss
# Calculating Loss
def NN_loss(self, y_hat, y):
loss = (y_hat - y)**2
loss = loss[0]
self.loss += loss
@angadsinghsandhu
angadsinghsandhu / backprop.py
Last active December 1, 2020 16:03
Backward Propagation
# Backward Propagation Logic
def backprop(self, y_hat, y):
# using chain rule to chain rule to find derivative of the
# loss function with respect to the last layer i.e. z
j = self.num_layers
# calculating last dz
cmd = "self.dz{} = 2 * (y_hat - y) * self.d_sigmoid(self.z{})".format(j, j, j)
exec(cmd)
@angadsinghsandhu
angadsinghsandhu / feedforward.py
Created December 1, 2020 05:17
Forward Propagation
# Forward Propagation Logic
def forwardprop(self):
# dynamically calculating first layer
exec("self.z1 = np.dot( self.w1, self.z0 ) + self.b1")
# dynamically calculating the "a" of layer
exec("self.a1 = self.sigmoid(self.z1)")
# dynamically calculating all other layers
for i in range(2, self.num_layers + 1):
@angadsinghsandhu
angadsinghsandhu / d_sigmoid.py
Created December 1, 2020 05:14
Derivative of the sigmoid squashing function
# activation function derrivative
def d_sigmoid(self, z):
eta = self.sigmoid(z)
return eta * (1 - eta)
@angadsinghsandhu
angadsinghsandhu / train.py
Created December 1, 2020 05:11
function that executed forward prop, backprop and loss calculation
# training neural net
def train(self):
# dynamically calculating layers and their respective z
for i in range(len(self.input)):
self.z0 = self.input[i].reshape([-1, 1])
# forward step
output = self.forwardprop()