Skip to content

Instantly share code, notes, and snippets.

View SkalskiP's full-sized avatar
👨‍💻
I open-source stuff

Piotr Skalski SkalskiP

👨‍💻
I open-source stuff
View GitHub Profile
@SkalskiP
SkalskiP / update.py
Last active April 12, 2020 21:28
Updating parameter values
def update(params_values, grads_values, nn_architecture, learning_rate):
for layer_idx, layer in enumerate(nn_architecture):
params_values["W" + str(layer_idx)] -= learning_rate * grads_values["dW" + str(layer_idx)]
params_values["b" + str(layer_idx)] -= learning_rate * grads_values["db" + str(layer_idx)]
return params_values;
@SkalskiP
SkalskiP / full_backward_propagation.py
Created October 8, 2018 22:52
Full backward propagation
def full_backward_propagation(Y_hat, Y, memory, params_values, nn_architecture):
grads_values = {}
m = Y.shape[1]
Y = Y.reshape(Y_hat.shape)
dA_prev = - (np.divide(Y, Y_hat) - np.divide(1 - Y, 1 - Y_hat));
for layer_idx_prev, layer in reversed(list(enumerate(nn_architecture))):
layer_idx_curr = layer_idx_prev + 1
activ_function_curr = layer["activation"]
@SkalskiP
SkalskiP / single_layer_backward_propagation.py
Last active October 8, 2018 22:47
Single layer backward propagation step
def single_layer_backward_propagation(dA_curr, W_curr, b_curr, Z_curr, A_prev, activation="relu"):
m = A_prev.shape[1]
if activation is "relu":
backward_activation_func = relu_backward
elif activation is "sigmoid":
backward_activation_func = sigmoid_backward
else:
raise Exception('Non-supported activation function')
@SkalskiP
SkalskiP / get_cost_value.py
Last active April 20, 2020 07:11
Calculating the value of the cost function and accuracy
def get_cost_value(Y_hat, Y):
m = Y_hat.shape[1]
cost = -1 / m * (np.dot(Y, np.log(Y_hat).T) + np.dot(1 - Y, np.log(1 - Y_hat).T))
return np.squeeze(cost)
def get_accuracy_value(Y_hat, Y):
Y_hat_ = convert_prob_into_class(Y_hat)
return (Y_hat_ == Y).all(axis=0).mean()
@SkalskiP
SkalskiP / full_forward_propagation.py
Last active October 8, 2018 22:48
Full forward propagation
def full_forward_propagation(X, params_values, nn_architecture):
memory = {}
A_curr = X
for idx, layer in enumerate(nn_architecture):
layer_idx = idx + 1
A_prev = A_curr
activ_function_curr = layer["activation"]
W_curr = params_values["W" + str(layer_idx)]
@SkalskiP
SkalskiP / single_layer_forward_propagation.py
Last active October 8, 2018 22:49
Single layer forward propagation step
def single_layer_forward_propagation(A_prev, W_curr, b_curr, activation="relu"):
Z_curr = np.dot(W_curr, A_prev) + b_curr
if activation is "relu":
activation_func = relu
elif activation is "sigmoid":
activation_func = sigmoid
else:
raise Exception('Non-supported activation function')
@SkalskiP
SkalskiP / activations.py
Created October 7, 2018 18:52
Activation functions
def sigmoid(Z):
return 1/(1+np.exp(-Z))
def relu(Z):
return np.maximum(0,Z)
def sigmoid_backward(dA, Z):
sig = sigmoid(Z)
return dA * sig * (1 - sig)
@SkalskiP
SkalskiP / nn_architecture.py
Created October 3, 2018 22:51
Example of neural network architecture
nn_architecture = [
{"input_dim": 2, "output_dim": 4, "activation": "relu"},
{"input_dim": 4, "output_dim": 6, "activation": "relu"},
{"input_dim": 6, "output_dim": 6, "activation": "relu"},
{"input_dim": 6, "output_dim": 4, "activation": "relu"},
{"input_dim": 4, "output_dim": 1, "activation": "sigmoid"},
]
@SkalskiP
SkalskiP / init_layers.py
Last active October 8, 2018 22:49
Initiation of parameter values for each layer
def init_layers(nn_architecture, seed = 99):
np.random.seed(seed)
number_of_layers = len(nn_architecture)
params_values = {}
for idx, layer in enumerate(nn_architecture):
layer_idx = idx + 1
layer_input_size = layer["input_dim"]
layer_output_size = layer["output_dim"]
# Adding callback functions that will run on each epoch
testmodelcb = keras.callbacks.LambdaCallback(on_epoch_end=save_model_prediction_graph)
# Compilation of the model
model.compile(loss='binary_crossentropy', optimizer='adamax', metrics=['accuracy'])
# Model training
model.fit(X_train, y_train, epochs=N_EPOCHS, verbose=0, callbacks=[testmodelcb])