Skip to content

Instantly share code, notes, and snippets.

View ksivaman's full-sized avatar

Kirthi Shankar Sivamani ksivaman

View GitHub Profile
@ksivaman
ksivaman / one_layer_forward_pass.py
Created July 13, 2019 20:31
Forward pass for one layer in feed forward neural networks
def one_layer_forward_pass(input_activations, weights, bias, activation='R'):
output = np.dot(weights, input_activations) + bias
if activation is 'R':
activation_next = activations.relu(output)
elif activation is 'S':
activation_next = activations.sigmoid(output)
else:
raise Exception('Nahh!')
@ksivaman
ksivaman / forward.py
Created July 13, 2019 21:02
Forward pass for all the layers.
def forward_pass(train_X, params_w, params_b, layers=[4, 5, 1], activate=['R', 'S']):
num_layers = len(layers) - 1
activation_dict = {}
output_dict = {}
curr_act = train_X
for index in range(num_layers):
@ksivaman
ksivaman / param_init.py
Created July 13, 2019 21:11
Initialize network parameters
def init(layers=[4, 5, 1]):
np.random.seed(42)
params_w = {}
params_b = {}
for index in range(len(layers)-1):
layer_num = index + 1
in_layer_size = layers[index]
@ksivaman
ksivaman / activations_and_derivatives.py
Created July 13, 2019 21:23
A collection of activation functions and their derivatives.
#sigmoid activation
def sigmoid(input):
return 1/(1 + np.exp(-input))
#relu activation
def relu(input):
return np.maximum(input, 0)
#derivate of a sigmoid w.r.t. input
def d_sigmoid(d_init, out):
@ksivaman
ksivaman / one_layer_backward_pass.py
Created July 13, 2019 22:27
One layer backward pass for feed forwards neural networks.
def one_layer_backward_pass(curr_grad, curr_weight, curr_bias, curr_out, prev_act, activation='R'):
#how many sample in previous activations?
num = prev_act.shape[1]
#find out what we are differentiating
if activation is 'R':
d_act_func = activations.d_relu
elif activation is 'S':
d_act_func = activations.d_sigmoid
@ksivaman
ksivaman / one_layer_backward_pass.py
Created July 13, 2019 23:05
A complete backward pass for the backpropagation algorithm
def backward_pass(y_pred, train_Y, activation_dict, output_dict, params_w, params_b, layers=[4, 5, 1], activate=['R', 'S']):
gradients = {}
num_samples = train_Y.shape[0]
train_Y = train_Y.reshape(y_pred.shape)
#derivative of binary cross entropy function w.r.t. predictions
d_prev_act = - (np.divide(train_Y, y_pred) - np.divide(1 - train_Y, 1 - y_pred))
@ksivaman
ksivaman / loss_and_accuracy.py
Created July 13, 2019 23:47
Utils for binary classification in a feed forward neural net.
#binary cross entropy loss
def cross_entropy_loss(y_pred, train_Y):
num_samples = y_pred.shape[1]
cost = -1 / num_samples * (np.dot(train_Y, np.log(y_pred).T) + np.dot(1 - train_Y, np.log(1 - y_pred).T))
return np.squeeze(cost)
#convert probabilities to class prediction with threshold 0.5
def get_class_from_probs(probabilities):
class_ = np.copy(probabilities)
class_[class_ > 0.5] = 1
@ksivaman
ksivaman / update_params.py
Created July 13, 2019 23:57
A function to update weights and biases for a feed forward neural network.
def param_updates(params_w, params_b, gradients, lr, layers=[4, 5, 1]):
for index in range(len(layers) - 1):
#gradient descent
params_w["weight" + str(index + 1)] -= lr * gradients["d_weight" + str(index + 1)]
params_b["bias" + str(index + 1)] -= lr * gradients["d_bias" + str(index + 1)]
return params_w, params_b
@ksivaman
ksivaman / train.py
Created July 14, 2019 00:17
Training loop for a simple feed forward neural network.
def train(train_X, train_Y, epochs, lr, layers=[4, 5, 1], activate=['R', 'S']):
# initiation of neural netowrk parameters
params_w, params_b = init(layers)
losses = []
accuracies = []
# performing calculations for subsequent iterations
for i in range(epochs):
# step forward
@ksivaman
ksivaman / imports.py
Created August 9, 2019 23:29
imports for neural style transfer
from PIL import Image
from io import BytesIO
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
import torch.optim as optim
import requests