Skip to content

Instantly share code, notes, and snippets.

@SkalskiP
Created October 8, 2018 22:52
Show Gist options
  • Save SkalskiP/f7749f92fcd1d7fc0e61370565fe41fb to your computer and use it in GitHub Desktop.
Save SkalskiP/f7749f92fcd1d7fc0e61370565fe41fb to your computer and use it in GitHub Desktop.
Full backward propagation
def full_backward_propagation(Y_hat, Y, memory, params_values, nn_architecture):
grads_values = {}
m = Y.shape[1]
Y = Y.reshape(Y_hat.shape)
dA_prev = - (np.divide(Y, Y_hat) - np.divide(1 - Y, 1 - Y_hat));
for layer_idx_prev, layer in reversed(list(enumerate(nn_architecture))):
layer_idx_curr = layer_idx_prev + 1
activ_function_curr = layer["activation"]
dA_curr = dA_prev
A_prev = memory["A" + str(layer_idx_prev)]
Z_curr = memory["Z" + str(layer_idx_curr)]
W_curr = params_values["W" + str(layer_idx_curr)]
b_curr = params_values["b" + str(layer_idx_curr)]
dA_prev, dW_curr, db_curr = single_layer_backward_propagation(
dA_curr, W_curr, b_curr, Z_curr, A_prev, activ_function_curr)
grads_values["dW" + str(layer_idx_curr)] = dW_curr
grads_values["db" + str(layer_idx_curr)] = db_curr
return grads_values
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment