Created
February 23, 2020 02:18
-
-
Save ptrblck/2823fc5830359419aab7773f0a098748 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
import os | |
import random | |
import shutil | |
import time | |
import warnings | |
import torch | |
import torch.nn as nn | |
import torch.nn.parallel | |
import torch.backends.cudnn as cudnn | |
import torch.distributed as dist | |
import torch.optim | |
import torch.multiprocessing as mp | |
import torch.utils.data | |
import torch.utils.data.distributed | |
import torchvision.transforms as transforms | |
import torchvision.datasets as datasets | |
import torchvision.models as models | |
from torch.utils.data import TensorDataset | |
import torch.nn.functional as F | |
model_names = sorted(name for name in models.__dict__ | |
if name.islower() and not name.startswith("__") | |
and callable(models.__dict__[name])) | |
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') | |
parser.add_argument('data', metavar='DIR', | |
help='path to dataset') | |
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', | |
choices=model_names, | |
help='model architecture: ' + | |
' | '.join(model_names) + | |
' (default: resnet18)') | |
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', | |
help='number of data loading workers (default: 4)') | |
parser.add_argument('--epochs', default=90, type=int, metavar='N', | |
help='number of total epochs to run') | |
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', | |
help='manual epoch number (useful on restarts)') | |
parser.add_argument('-b', '--batch-size', default=256, type=int, | |
metavar='N', | |
help='mini-batch size (default: 256), this is the total ' | |
'batch size of all GPUs on the current node when ' | |
'using Data Parallel or Distributed Data Parallel') | |
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, | |
metavar='LR', help='initial learning rate', dest='lr') | |
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', | |
help='momentum') | |
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, | |
metavar='W', help='weight decay (default: 1e-4)', | |
dest='weight_decay') | |
parser.add_argument('-p', '--print-freq', default=10, type=int, | |
metavar='N', help='print frequency (default: 10)') | |
parser.add_argument('--resume', default='', type=str, metavar='PATH', | |
help='path to latest checkpoint (default: none)') | |
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', | |
help='evaluate model on validation set') | |
parser.add_argument('--pretrained', dest='pretrained', action='store_true', | |
help='use pre-trained model') | |
parser.add_argument('--world-size', default=-1, type=int, | |
help='number of nodes for distributed training') | |
parser.add_argument('--rank', default=-1, type=int, | |
help='node rank for distributed training') | |
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, | |
help='url used to set up distributed training') | |
parser.add_argument('--dist-backend', default='nccl', type=str, | |
help='distributed backend') | |
parser.add_argument('--seed', default=None, type=int, | |
help='seed for initializing training. ') | |
parser.add_argument('--gpu', default=None, type=int, | |
help='GPU id to use.') | |
parser.add_argument('--multiprocessing-distributed', action='store_true', | |
help='Use multi-processing distributed training to launch ' | |
'N processes per node, which has N GPUs. This is the ' | |
'fastest way to use PyTorch for either single node or ' | |
'multi node data parallel training') | |
best_acc1 = 0 | |
def main(): | |
args = parser.parse_args() | |
if args.seed is not None: | |
random.seed(args.seed) | |
torch.manual_seed(args.seed) | |
cudnn.deterministic = True | |
warnings.warn('You have chosen to seed training. ' | |
'This will turn on the CUDNN deterministic setting, ' | |
'which can slow down your training considerably! ' | |
'You may see unexpected behavior when restarting ' | |
'from checkpoints.') | |
if args.gpu is not None: | |
warnings.warn('You have chosen a specific GPU. This will completely ' | |
'disable data parallelism.') | |
if args.dist_url == "env://" and args.world_size == -1: | |
args.world_size = int(os.environ["WORLD_SIZE"]) | |
args.distributed = args.world_size > 1 or args.multiprocessing_distributed | |
ngpus_per_node = torch.cuda.device_count() | |
if args.multiprocessing_distributed: | |
# Since we have ngpus_per_node processes per node, the total world_size | |
# needs to be adjusted accordingly | |
args.world_size = ngpus_per_node * args.world_size | |
# Use torch.multiprocessing.spawn to launch distributed processes: the | |
# main_worker process function | |
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) | |
else: | |
# Simply call main_worker function | |
main_worker(args.gpu, ngpus_per_node, args) | |
class Net(nn.Module): | |
def __init__(self, input_dim, hidden_dim, output_dim, hidden_layers): | |
super(Net, self).__init__() | |
self.input_dim = input_dim | |
self.hidden_dim = hidden_dim | |
self.output_dim = output_dim | |
self.hidden_layers = hidden_layers | |
self.lstm = nn.LSTM(input_dim, hidden_dim, hidden_layers,batch_first=True) | |
self.h2o = nn.Linear(hidden_dim, output_dim) | |
def forward(self, x,y): | |
self.lstm.flatten_parameters() | |
h_t, _ = self.lstm(x) | |
output = self.h2o(h_t) | |
loss = F.mse_loss(output, y) | |
return loss | |
def main_worker(gpu, ngpus_per_node, args): | |
global best_acc1 | |
args.gpu = gpu | |
if args.gpu is not None: | |
print("Use GPU: {} for training".format(args.gpu)) | |
if args.distributed: | |
if args.dist_url == "env://" and args.rank == -1: | |
args.rank = int(os.environ["RANK"]) | |
if args.multiprocessing_distributed: | |
# For multiprocessing distributed training, rank needs to be the | |
# global rank among all the processes | |
args.rank = args.rank * ngpus_per_node + gpu | |
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, | |
world_size=args.world_size, rank=args.rank) | |
# create model | |
if args.pretrained: | |
print("=> using pre-trained model '{}'".format(args.arch)) | |
model = models.__dict__[args.arch](pretrained=True) | |
else: | |
model = Net(10, 10, 10, 10) | |
if args.distributed: | |
print('using DDP') | |
# For multiprocessing distributed, DistributedDataParallel constructor | |
# should always set the single device scope, otherwise, | |
# DistributedDataParallel will use all available devices. | |
if args.gpu is not None: | |
torch.cuda.set_device(args.gpu) | |
model.cuda(args.gpu) | |
# When using a single GPU per process and per | |
# DistributedDataParallel, we need to divide the batch size | |
# ourselves based on the total number of GPUs we have | |
args.batch_size = int(args.batch_size / ngpus_per_node) | |
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) | |
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) | |
else: | |
model.cuda() | |
# DistributedDataParallel will divide and allocate batch_size to all | |
# available GPUs if device_ids are not set | |
model = torch.nn.parallel.DistributedDataParallel(model) | |
elif args.gpu is not None: | |
torch.cuda.set_device(args.gpu) | |
model = model.cuda(args.gpu) | |
else: | |
print('using dataparallel') | |
# DataParallel will divide and allocate batch_size to all available GPUs | |
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): | |
model.features = torch.nn.DataParallel(model.features) | |
model.cuda() | |
else: | |
model = torch.nn.DataParallel(model).cuda() | |
# define loss function (criterion) and optimizer | |
criterion = nn.CrossEntropyLoss().cuda(args.gpu) | |
optimizer = torch.optim.SGD(model.parameters(), args.lr, | |
momentum=args.momentum, | |
weight_decay=args.weight_decay) | |
cudnn.benchmark = True | |
# Data loading code | |
train_dataset = TensorDataset( | |
torch.randn(256, 10, 10), | |
torch.randn(256, 10, 10)) | |
if args.distributed: | |
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) | |
else: | |
train_sampler = None | |
train_loader = torch.utils.data.DataLoader( | |
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), | |
num_workers=args.workers, pin_memory=True, sampler=train_sampler) | |
for epoch in range(args.start_epoch, args.epochs): | |
if args.distributed: | |
train_sampler.set_epoch(epoch) | |
# train for one epoch | |
train(train_loader, model, criterion, optimizer, epoch, args) | |
# remember best acc@1 and save checkpoint | |
def train(train_loader, model, criterion, optimizer, epoch, args): | |
# switch to train mode | |
model.train() | |
for i, (images, target) in enumerate(train_loader): | |
if args.gpu is not None: | |
images = images.cuda(args.gpu, non_blocking=True) | |
target = target.cuda(args.gpu, non_blocking=True) | |
# compute output | |
loss = model(images, target) | |
#loss = criterion(output, target) | |
# compute gradient and do SGD step | |
optimizer.zero_grad() | |
loss.mean().backward() | |
optimizer.step() | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment