Last active
April 25, 2019 02:06
-
-
Save davidnvq/fd7a45adcabc374bab840744a3ff95ff to your computer and use it in GitHub Desktop.
Adaptive learning rate example from ImageNet
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/pytorch/examples/blob/master/imagenet/main.py#L392-L396 | |
def adjust_learning_rate(optimizer, epoch, args): | |
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" | |
lr = args.lr * (0.1 ** (epoch // 30)) | |
for param_group in optimizer.param_groups: | |
param_group['lr'] = lr | |
# Create a learning rate adjustment function that divides the learning rate by 10 every 30 epochs | |
def adjust_learning_rate(epoch): | |
lr = 0.001 | |
if epoch > 180: | |
lr = lr / 1000000 | |
elif epoch > 150: | |
lr = lr / 100000 | |
elif epoch > 120: | |
lr = lr / 10000 | |
elif epoch > 90: | |
lr = lr / 1000 | |
elif epoch > 60: | |
lr = lr / 100 | |
elif epoch > 30: | |
lr = lr / 10 | |
for param_group in optimizer.param_groups: | |
param_group["lr"] = lr | |
@quanguet | |
for epoch in range(num_epochs): | |
# adjust the lr | |
adjust_learning_rate(optimizer, epoch, args) | |
for X, Y in dataloader: | |
# zero out grad | |
optimizer.zero_grad() | |
# do forward | |
Y_pred = model(X) | |
# compute loss | |
loss = criterion(Y_pred, Y) | |
# do backward | |
loss.backward() | |
# clip grad | |
nn.utils.clip_grad_norm(model.parameters(), max_norm=5.0) | |
# update params | |
optimizer.step() | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment