Skip to content

Instantly share code, notes, and snippets.

@angadsinghsandhu
Last active December 1, 2020 16:03
Show Gist options
  • Save angadsinghsandhu/79720b658b35866d358314071d754ede to your computer and use it in GitHub Desktop.
Save angadsinghsandhu/79720b658b35866d358314071d754ede to your computer and use it in GitHub Desktop.
Backward Propagation
# Backward Propagation Logic
def backprop(self, y_hat, y):
# using chain rule to chain rule to find derivative of the
# loss function with respect to the last layer i.e. z
j = self.num_layers
# calculating last dz
cmd = "self.dz{} = 2 * (y_hat - y) * self.d_sigmoid(self.z{})".format(j, j, j)
exec(cmd)
# calculating all other dzs
for i in range(j-1, 0, -1):
cmd = "self.dz{} = self.d_sigmoid(self.z{}) * np.dot( self.w{}.T , self.dz{} )".format(
i, i, i+1, i+1)
exec(cmd)
# calculating and updating the weights
for i in range(j, 1, -1):
# creating dW
cmd1 = "self.dw{} = np.dot( self.dz{}, np.transpose(self.a{}) )".format(
i, i, i-1)
# updating W using dW
cmd2 = "self.w{} -= self.alpha*self.dw{}".format(i, i)
exec(cmd1)
exec(cmd2)
# calculating and updating the first weight
# creating dW
cmd1 = "self.dw1 = np.dot( self.dz1, self.z0.T )"
# updating W using dW
cmd2 = "self.w1 -= self.alpha*self.dw1"
# updating B using dz
for i in range(j, 0, -1):
cmd2 = "self.b{} -= self.alpha*self.dz{}".format(i, i)
exec(cmd1)
exec(cmd2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment