Last active
November 6, 2022 14:21
-
-
Save chenyaofo/ffaec7aae68506462fdff83a9474e967 to your computer and use it in GitHub Desktop.
Train ResNet50+Layernorm
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
import os | |
import random | |
import shutil | |
import time | |
import warnings | |
import torch | |
import torch.nn as nn | |
import torch.nn.parallel | |
import torch.backends.cudnn as cudnn | |
import torch.distributed as dist | |
import torch.optim | |
import torch.multiprocessing as mp | |
import torch.utils.data | |
import torch.utils.data.distributed | |
import torchvision.transforms as transforms | |
import torchvision.datasets as datasets | |
import torchvision.models as models | |
import models.Res as Resnet | |
model_names = sorted(name for name in models.__dict__ | |
if name.islower() and not name.startswith("__") | |
and callable(models.__dict__[name])) | |
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') | |
parser.add_argument('--data', | |
metavar='DIR', | |
default='/dockerdata/imagenet', | |
help='path to dataset') | |
parser.add_argument('-a', | |
'--arch', | |
metavar='ARCH', | |
default='resnet50', | |
choices=model_names, | |
help='model architecture: ' + ' | '.join(model_names) + | |
' (default: resnet18)') | |
parser.add_argument('-j', | |
'--workers', | |
default=8, | |
type=int, | |
metavar='N', | |
help='number of data loading workers (default: 4)') | |
parser.add_argument('--epochs', | |
default=100, #90 | |
type=int, | |
metavar='N', | |
help='number of total epochs to run') | |
parser.add_argument('--start-epoch', | |
default=0, | |
type=int, | |
metavar='N', | |
help='manual epoch number (useful on restarts)') | |
parser.add_argument('-b', | |
'--batch-size', | |
default=256, #1024 | |
type=int, | |
metavar='N', | |
help='mini-batch size (default: 3200), this is the total ' | |
'batch size of all GPUs on the current node when ' | |
'using Data Parallel or Distributed Data Parallel') | |
parser.add_argument('--lr', | |
'--learning-rate', | |
default=0.1, #0.4 | |
type=float, | |
metavar='LR', | |
help='initial learning rate', | |
dest='lr') | |
parser.add_argument('--momentum', | |
default=0.9, | |
type=float, | |
metavar='M', | |
help='momentum') | |
parser.add_argument('--local_rank', | |
default=-1, | |
type=int, | |
help='node rank for distributed training') | |
parser.add_argument('--wd', | |
'--weight-decay', | |
default=1e-4, | |
type=float, | |
metavar='W', | |
help='weight decay (default: 1e-4)', | |
dest='weight_decay') | |
parser.add_argument('-p', | |
'--print-freq', | |
default=10, | |
type=int, | |
metavar='N', | |
help='print frequency (default: 10)') | |
parser.add_argument('-e', | |
'--evaluate', | |
dest='evaluate', | |
action='store_true', | |
help='evaluate model on validation set') | |
parser.add_argument('--pretrained', | |
dest='pretrained', | |
action='store_true', | |
help='use pre-trained model') | |
parser.add_argument('--seed', | |
default=None, | |
type=int, | |
help='seed for initializing training. ') | |
class LayerNorm2d(nn.LayerNorm): | |
""" LayerNorm for channels of '2D' spatial NCHW tensors """ | |
def __init__(self, num_channels, eps=1e-6, affine=True): | |
super().__init__(num_channels, eps=eps, elementwise_affine=affine) | |
def forward(self, x): | |
x = x.permute(0, 2, 3, 1) | |
x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) | |
x = x.permute(0, 3, 1, 2) | |
return x | |
def ln_helper(planes): | |
# return nn.GroupNorm(32, planes) # 16 is the number of group norms | |
# return nn.InstanceNorm2d(planes, affine=True) | |
return LayerNorm2d(planes) | |
def reduce_mean(tensor, nprocs): | |
rt = tensor.clone() | |
dist.all_reduce(rt, op=dist.ReduceOp.SUM) | |
rt /= nprocs | |
return rt | |
def main(): | |
args = parser.parse_args() | |
args.nprocs = torch.cuda.device_count() | |
if args.seed is not None: | |
random.seed(args.seed) | |
torch.manual_seed(args.seed) | |
cudnn.deterministic = True | |
warnings.warn('You have chosen to seed training. ' | |
'This will turn on the CUDNN deterministic setting, ' | |
'which can slow down your training considerably! ' | |
'You may see unexpected behavior when restarting ' | |
'from checkpoints.') | |
main_worker(args.local_rank, args.nprocs, args) | |
def main_worker(local_rank, nprocs, args): | |
best_acc1 = .0 | |
dist.init_process_group(backend='nccl') | |
# create model | |
if args.pretrained: | |
print("=> using pre-trained model '{}'".format(args.arch)) | |
model = models.__dict__[args.arch](pretrained=True) | |
else: | |
print("=> creating model '{}'".format(args.arch)) | |
# model = models.__dict__[args.arch]() | |
model = Resnet.__dict__[args.arch](pretrained=False, norm_layer=ln_helper) | |
print(model) | |
torch.cuda.set_device(local_rank) | |
model.cuda(local_rank) | |
# When using a single GPU per process and per | |
# DistributedDataParallel, we need to divide the batch size | |
# ourselves based on the total number of GPUs we have | |
args.batch_size = int(args.batch_size / nprocs) | |
model = torch.nn.parallel.DistributedDataParallel(model, | |
device_ids=[local_rank]) | |
# define loss function (criterion) and optimizer | |
criterion = nn.CrossEntropyLoss().cuda(local_rank) | |
optimizer = torch.optim.SGD(model.parameters(), | |
args.lr, | |
momentum=args.momentum, | |
weight_decay=args.weight_decay) | |
cudnn.benchmark = True | |
# Data loading code | |
traindir = os.path.join(args.data, 'train') | |
valdir = os.path.join(args.data, 'val') | |
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], | |
std=[0.229, 0.224, 0.225]) | |
train_dataset = datasets.ImageFolder( | |
traindir, | |
transforms.Compose([ | |
transforms.RandomResizedCrop(224), | |
transforms.RandomHorizontalFlip(), | |
# transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), | |
transforms.ToTensor(), | |
normalize, | |
])) | |
train_sampler = torch.utils.data.distributed.DistributedSampler( | |
train_dataset) | |
train_loader = torch.utils.data.DataLoader(train_dataset, | |
batch_size=args.batch_size, | |
num_workers=2, | |
pin_memory=True, | |
sampler=train_sampler) | |
val_dataset = datasets.ImageFolder( | |
valdir, | |
transforms.Compose([ | |
transforms.Resize(256), | |
transforms.CenterCrop(224), | |
transforms.ToTensor(), | |
normalize, | |
])) | |
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset) | |
val_loader = torch.utils.data.DataLoader(val_dataset, | |
batch_size=args.batch_size, | |
num_workers=2, | |
pin_memory=True, | |
sampler=val_sampler) | |
if args.evaluate: | |
validate(val_loader, model, criterion, local_rank, args) | |
return | |
for epoch in range(args.start_epoch, args.epochs): | |
train_sampler.set_epoch(epoch) | |
val_sampler.set_epoch(epoch) | |
adjust_learning_rate(optimizer, epoch, args) | |
# train for one epoch | |
train(train_loader, model, criterion, optimizer, epoch, local_rank, | |
args) | |
# evaluate on validation set | |
acc1 = validate(val_loader, model, criterion, local_rank, args) | |
# remember best acc@1 and save checkpoint | |
is_best = acc1 > best_acc1 | |
best_acc1 = max(acc1, best_acc1) | |
if args.local_rank == 0: | |
save_checkpoint( | |
{ | |
'epoch': epoch + 1, | |
'arch': args.arch, | |
'state_dict': model.module.state_dict(), | |
'best_acc1': best_acc1, | |
}, | |
is_best, | |
filename="/apdcephfs/private_huberyniu/cli_pretrained_models/my_trained_ln_resnets/resnet50/checkpoint.pth.tar") | |
# filename="/apdcephfs/private_huberyniu/cli_pretrained_models/my_trained_gn_resnets/resnet50-v3/checkpoint.pth.tar" | |
def train(train_loader, model, criterion, optimizer, epoch, local_rank, args): | |
batch_time = AverageMeter('Time', ':6.3f') | |
data_time = AverageMeter('Data', ':6.3f') | |
losses = AverageMeter('Loss', ':.4e') | |
top1 = AverageMeter('Acc@1', ':6.2f') | |
top5 = AverageMeter('Acc@5', ':6.2f') | |
progress = ProgressMeter(len(train_loader), | |
[batch_time, data_time, losses, top1, top5], | |
prefix="Epoch: [{}]".format(epoch)) | |
# switch to train mode | |
model.train() | |
end = time.time() | |
for i, (images, target) in enumerate(train_loader): | |
# measure data loading time | |
data_time.update(time.time() - end) | |
images = images.cuda(local_rank, non_blocking=True) | |
target = target.cuda(local_rank, non_blocking=True) | |
# compute output | |
output = model(images) | |
loss = criterion(output, target) | |
# measure accuracy and record loss | |
acc1, acc5 = accuracy(output, target, topk=(1, 5)) | |
torch.distributed.barrier() | |
reduced_loss = reduce_mean(loss, args.nprocs) | |
reduced_acc1 = reduce_mean(acc1, args.nprocs) | |
reduced_acc5 = reduce_mean(acc5, args.nprocs) | |
losses.update(reduced_loss.item(), images.size(0)) | |
top1.update(reduced_acc1.item(), images.size(0)) | |
top5.update(reduced_acc5.item(), images.size(0)) | |
# compute gradient and do SGD step | |
optimizer.zero_grad() | |
loss.backward() | |
optimizer.step() | |
# measure elapsed time | |
batch_time.update(time.time() - end) | |
end = time.time() | |
if i % args.print_freq == 0: | |
progress.display(i) | |
def validate(val_loader, model, criterion, local_rank, args): | |
batch_time = AverageMeter('Time', ':6.3f') | |
losses = AverageMeter('Loss', ':.4e') | |
top1 = AverageMeter('Acc@1', ':6.2f') | |
top5 = AverageMeter('Acc@5', ':6.2f') | |
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], | |
prefix='Test: ') | |
# switch to evaluate mode | |
model.eval() | |
with torch.no_grad(): | |
end = time.time() | |
for i, (images, target) in enumerate(val_loader): | |
images = images.cuda(local_rank, non_blocking=True) | |
target = target.cuda(local_rank, non_blocking=True) | |
# compute output | |
output = model(images) | |
loss = criterion(output, target) | |
# measure accuracy and record loss | |
acc1, acc5 = accuracy(output, target, topk=(1, 5)) | |
torch.distributed.barrier() | |
reduced_loss = reduce_mean(loss, args.nprocs) | |
reduced_acc1 = reduce_mean(acc1, args.nprocs) | |
reduced_acc5 = reduce_mean(acc5, args.nprocs) | |
losses.update(reduced_loss.item(), images.size(0)) | |
top1.update(reduced_acc1.item(), images.size(0)) | |
top5.update(reduced_acc5.item(), images.size(0)) | |
# measure elapsed time | |
batch_time.update(time.time() - end) | |
end = time.time() | |
if i % args.print_freq == 0: | |
progress.display(i) | |
# TODO: this should also be done with the ProgressMeter | |
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, | |
top5=top5)) | |
return top1.avg | |
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): | |
torch.save(state, filename) | |
if is_best: | |
shutil.copyfile(filename, '/apdcephfs/private_huberyniu/cli_pretrained_models/my_trained_ln_resnets/resnet50/model_best.pth.tar') | |
# '/apdcephfs/private_huberyniu/cli_pretrained_models/my_trained_gn_resnets/resnet50-v3/model_best.pth.tar' | |
class AverageMeter(object): | |
"""Computes and stores the average and current value""" | |
def __init__(self, name, fmt=':f'): | |
self.name = name | |
self.fmt = fmt | |
self.reset() | |
def reset(self): | |
self.val = 0 | |
self.avg = 0 | |
self.sum = 0 | |
self.count = 0 | |
def update(self, val, n=1): | |
self.val = val | |
self.sum += val * n | |
self.count += n | |
self.avg = self.sum / self.count | |
def __str__(self): | |
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' | |
return fmtstr.format(**self.__dict__) | |
class ProgressMeter(object): | |
def __init__(self, num_batches, meters, prefix=""): | |
self.batch_fmtstr = self._get_batch_fmtstr(num_batches) | |
self.meters = meters | |
self.prefix = prefix | |
def display(self, batch): | |
entries = [self.prefix + self.batch_fmtstr.format(batch)] | |
entries += [str(meter) for meter in self.meters] | |
print('\t'.join(entries)) | |
def _get_batch_fmtstr(self, num_batches): | |
num_digits = len(str(num_batches // 1)) | |
fmt = '{:' + str(num_digits) + 'd}' | |
return '[' + fmt + '/' + fmt.format(num_batches) + ']' | |
def adjust_learning_rate(optimizer, epoch, args): | |
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" | |
lr = args.lr * (0.1**(epoch // 30)) | |
for param_group in optimizer.param_groups: | |
param_group['lr'] = lr | |
def accuracy(output, target, topk=(1, )): | |
"""Computes the accuracy over the k top predictions for the specified values of k""" | |
with torch.no_grad(): | |
maxk = max(topk) | |
batch_size = target.size(0) | |
_, pred = output.topk(maxk, 1, True, True) | |
pred = pred.t() | |
correct = pred.eq(target.view(1, -1).expand_as(pred)) | |
res = [] | |
for k in topk: | |
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) | |
res.append(correct_k.mul_(100.0 / batch_size)) | |
return res | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment