Created
February 16, 2021 18:26
-
-
Save btseytlin/08ec0c4a5a838c4623c188a440f8a9f2 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def batchnorm_forward(x, gamma, beta, bn_param): | |
""" | |
Forward pass for batch normalization. | |
During training the sample mean and (uncorrected) sample variance are | |
computed from minibatch statistics and used to normalize the incoming data. | |
During training we also keep an exponentially decaying running mean of the | |
mean and variance of each feature, and these averages are used to normalize | |
data at test-time. | |
At each timestep we update the running averages for mean and variance using | |
an exponential decay based on the momentum parameter: | |
running_mean = momentum * running_mean + (1 - momentum) * sample_mean | |
running_var = momentum * running_var + (1 - momentum) * sample_var | |
Note that the batch normalization paper suggests a different test-time | |
behavior: they compute sample mean and variance for each feature using a | |
large number of training images rather than using a running average. For | |
this implementation we have chosen to use running averages instead since | |
they do not require an additional estimation step; the torch7 | |
implementation of batch normalization also uses running averages. | |
Input: | |
- x: Data of shape (N, D) | |
- gamma: Scale parameter of shape (D,) | |
- beta: Shift paremeter of shape (D,) | |
- bn_param: Dictionary with the following keys: | |
- mode: 'train' or 'test'; required | |
- eps: Constant for numeric stability | |
- momentum: Constant for running mean / variance. | |
- running_mean: Array of shape (D,) giving running mean of features | |
- running_var Array of shape (D,) giving running variance of features | |
Returns a tuple of: | |
- out: of shape (N, D) | |
- cache: A tuple of values needed in the backward pass | |
""" | |
mode = bn_param["mode"] | |
eps = bn_param.get("eps", 1e-5) | |
momentum = bn_param.get("momentum", 0.9) | |
N, D = x.shape | |
running_mean = bn_param.get("running_mean", np.zeros(D, dtype=x.dtype)) | |
running_var = bn_param.get("running_var", np.zeros(D, dtype=x.dtype)) | |
out, cache = None, None | |
if mode == "train": | |
####################################################################### | |
# TODO: Implement the training-time forward pass for batch norm. # | |
# Use minibatch statistics to compute the mean and variance, use # | |
# these statistics to normalize the incoming data, and scale and # | |
# shift the normalized data using gamma and beta. # | |
# # | |
# You should store the output in the variable out. Any intermediates # | |
# that you need for the backward pass should be stored in the cache # | |
# variable. # | |
# # | |
# You should also use your computed sample mean and variance together # | |
# with the momentum variable to update the running mean and running # | |
# variance, storing your result in the running_mean and running_var # | |
# variables. # | |
# # | |
# Note that though you should be keeping track of the running # | |
# variance, you should normalize the data based on the standard # | |
# deviation (square root of variance) instead! # | |
# Referencing the original paper (https://arxiv.org/abs/1502.03167) # | |
# might prove to be helpful. # | |
####################################################################### | |
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** | |
batch_mean = np.mean(x, axis=0) | |
batch_var = np.var(x, axis=0) | |
running_mean = (momentum) * running_mean + (1 - momentum) * batch_mean | |
running_var = (momentum) * running_var + (1 - momentum) * batch_var | |
# Step 1, mu (D, ) | |
mu = 1/N * np.sum(x, axis=0) | |
# Step 2, xmu (N, D) | |
xmu = x - mu | |
# Step 3, carre (N, D) | |
carre = xmu**2 | |
# Step 4, var (D, ) | |
var = 1/N * np.sum(carre, axis=0) | |
# Step 5, sqrtvar (D, ) | |
sqrtvar = np.sqrt(var + eps) | |
# Step 6, invvar (D, ) | |
invvar = 1/sqrtvar | |
# Step 7, xnorm (N, D) | |
xnorm = xmu * invvar | |
# Step 8, y (N, D) | |
out = xnorm * gamma + beta | |
cache = (eps, gamma, x, mu, xmu, carre, var, sqrtvar, invvar, xnorm) | |
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** | |
####################################################################### | |
# END OF YOUR CODE # | |
####################################################################### | |
elif mode == "test": | |
####################################################################### | |
# TODO: Implement the test-time forward pass for batch normalization. # | |
# Use the running mean and variance to normalize the incoming data, # | |
# then scale and shift the normalized data using gamma and beta. # | |
# Store the result in the out variable. # | |
####################################################################### | |
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** | |
out = (x - running_mean)/(np.sqrt(running_var) + eps) * gamma + beta | |
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** | |
####################################################################### | |
# END OF YOUR CODE # | |
####################################################################### | |
else: | |
raise ValueError('Invalid forward batchnorm mode "%s"' % mode) | |
# Store the updated running means back into bn_param | |
bn_param["running_mean"] = running_mean | |
bn_param["running_var"] = running_var | |
return out, cache | |
def batchnorm_backward(dout, cache): | |
""" | |
Backward pass for batch normalization. | |
For this implementation, you should write out a computation graph for | |
batch normalization on paper and propagate gradients backward through | |
intermediate nodes. | |
Inputs: | |
- dout: Upstream derivatives, of shape (N, D) | |
- cache: Variable of intermediates from batchnorm_forward. | |
Returns a tuple of: | |
- dx: Gradient with respect to inputs x, of shape (N, D) | |
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,) | |
- dbeta: Gradient with respect to shift parameter beta, of shape (D,) | |
""" | |
dx, dgamma, dbeta = None, None, None | |
########################################################################### | |
# TODO: Implement the backward pass for batch normalization. Store the # | |
# results in the dx, dgamma, and dbeta variables. # | |
# Referencing the original paper (https://arxiv.org/abs/1502.03167) # | |
# might prove to be helpful. # | |
########################################################################### | |
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** | |
# https://www.cnblogs.com/bernieloveslife/p/10189369.html | |
# https://github.com/cthorey/CS231/blob/master/assignment2/cs231n/layers.py#L154 | |
N, D = dout.shape | |
(eps, gamma, x, mu, xmu, carre, var, sqrtvar, invvar, xnorm) = cache | |
dbeta = np.sum(dout, axis=0).T | |
dgamma = np.sum(dout * xnorm, axis=0) | |
dxnorm = dout * gamma | |
# print(xnorm.shape) | |
# print(dxnorm.shape) | |
dinvvar = np.sum(xmu * dxnorm, axis=0) | |
dsqrtvar = -1/(sqrtvar**2) * dinvvar | |
dvar = 1/2 * 1/np.sqrt(var + eps) * dsqrtvar | |
dcarre = 1/N * np.ones(carre.shape) * dvar | |
dxmu = invvar * dxnorm | |
dxmu += 2 * xmu * dcarre | |
dx = dxmu | |
dmu = np.sum(-dxmu, axis=0) | |
dx += 1/N * np.ones((dmu.shape)) * dmu | |
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** | |
########################################################################### | |
# END OF YOUR CODE # | |
########################################################################### | |
return dx, dgamma, dbeta |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment