-
-
Save johnsonc/66b982fded5210e9a8cef58859952c34 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torch.nn as nn | |
import torch.nn.parallel | |
class DCGAN_D(nn.Container): | |
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0): | |
super(DCGAN_D, self).__init__() | |
self.ngpu = ngpu | |
assert isize % 16 == 0, "isize has to be a multiple of 16" | |
main = nn.Sequential( | |
# input is nc x isize x isize | |
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), | |
nn.LeakyReLU(0.2, inplace=True), | |
) | |
i, csize, cndf = 2, isize / 2, ndf | |
# Extra layers | |
for t in range(n_extra_layers): | |
main.add_module(str(i), | |
nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False)) | |
main.add_module(str(i+1), | |
nn.BatchNorm2d(cndf)) | |
main.add_module(str(i+2), | |
nn.LeakyReLU(0.2, inplace=True)) | |
i += 3 | |
while csize > 4: | |
in_feat = cndf | |
out_feat = cndf * 2 | |
main.add_module(str(i), | |
nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False)) | |
main.add_module(str(i+1), | |
nn.BatchNorm2d(out_feat)) | |
main.add_module(str(i+2), | |
nn.LeakyReLU(0.2, inplace=True)) | |
i+=3 | |
cndf = cndf * 2 | |
csize = csize / 2 | |
# state size. K x 4 x 4 | |
main.add_module(str(i), | |
nn.Conv2d(cndf, 1, 4, 1, 0, bias=False)) | |
self.main = main | |
def forward(self, input): | |
gpu_ids = None | |
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: | |
gpu_ids = range(self.ngpu) | |
output = nn.parallel.data_parallel(self.main, input, gpu_ids) | |
output = output.mean(0) | |
return output.view(1) | |
class DCGAN_G(nn.Container): | |
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0): | |
super(DCGAN_G, self).__init__() | |
self.ngpu = ngpu | |
assert isize % 16 == 0, "isize has to be a multiple of 16" | |
cngf, tisize = ngf//2, 4 | |
while tisize != isize: | |
cngf = cngf * 2 | |
tisize = tisize * 2 | |
main = nn.Sequential( | |
# input is Z, going into a convolution | |
nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False), | |
nn.BatchNorm2d(cngf), | |
nn.ReLU(True), | |
) | |
i, csize, cndf = 3, 4, cngf | |
while csize < isize//2: | |
main.add_module(str(i), | |
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False)) | |
main.add_module(str(i+1), | |
nn.BatchNorm2d(cngf//2)) | |
main.add_module(str(i+2), | |
nn.ReLU(True)) | |
i += 3 | |
cngf = cngf // 2 | |
csize = csize * 2 | |
# Extra layers | |
for t in range(n_extra_layers): | |
main.add_module(str(i), | |
nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False)) | |
main.add_module(str(i+1), | |
nn.BatchNorm2d(cngf)) | |
main.add_module(str(i+2), | |
nn.ReLU(True)) | |
i += 3 | |
main.add_module(str(i), | |
nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False)) | |
main.add_module(str(i+1), nn.Tanh()) | |
self.main = main | |
def forward(self, input): | |
gpu_ids = None | |
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: | |
gpu_ids = range(self.ngpu) | |
return nn.parallel.data_parallel(self.main, input, gpu_ids) | |
############################################################################### | |
class DCGAN_D_nobn(nn.Container): | |
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0): | |
super(DCGAN_D_nobn, self).__init__() | |
self.ngpu = ngpu | |
assert isize % 16 == 0, "isize has to be a multiple of 16" | |
main = nn.Sequential( | |
# input is nc x isize x isize | |
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), | |
nn.LeakyReLU(0.2, inplace=True), | |
) | |
i, csize, cndf = 2, isize / 2, ndf | |
# Extra layers | |
for t in range(n_extra_layers): | |
main.add_module(str(i), | |
nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False)) | |
main.add_module(str(i+1), | |
nn.LeakyReLU(0.2, inplace=True)) | |
i += 2 | |
while csize > 4: | |
in_feat = cndf | |
out_feat = cndf * 2 | |
main.add_module(str(i), | |
nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False)) | |
main.add_module(str(i+1), | |
nn.LeakyReLU(0.2, inplace=True)) | |
i+=2 | |
cndf = cndf * 2 | |
csize = csize / 2 | |
# state size. K x 4 x 4 | |
main.add_module(str(i), | |
nn.Conv2d(cndf, 1, 4, 1, 0, bias=False)) | |
self.main = main | |
def forward(self, input): | |
gpu_ids = None | |
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: | |
gpu_ids = range(self.ngpu) | |
output = nn.parallel.data_parallel(self.main, input, gpu_ids) | |
output = output.mean(0) | |
return output.view(1) | |
class DCGAN_G_nobn(nn.Container): | |
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0): | |
super(DCGAN_G_nobn, self).__init__() | |
self.ngpu = ngpu | |
assert isize % 16 == 0, "isize has to be a multiple of 16" | |
cngf, tisize = ngf//2, 4 | |
while tisize != isize: | |
cngf = cngf * 2 | |
tisize = tisize * 2 | |
main = nn.Sequential( | |
# input is Z, going into a convolution | |
nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False), | |
nn.ReLU(True), | |
) | |
i, csize, cndf = 3, 4, cngf | |
while csize < isize//2: | |
main.add_module(str(i), | |
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False)) | |
main.add_module(str(i+1), | |
nn.ReLU(True)) | |
i += 2 | |
cngf = cngf // 2 | |
csize = csize * 2 | |
# Extra layers | |
for t in range(n_extra_layers): | |
main.add_module(str(i), | |
nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False)) | |
main.add_module(str(i+1), | |
nn.ReLU(True)) | |
i += 2 | |
main.add_module(str(i), | |
nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False)) | |
main.add_module(str(i+1), nn.Tanh()) | |
self.main = main | |
def forward(self, input): | |
gpu_ids = None | |
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: | |
gpu_ids = range(self.ngpu) | |
return nn.parallel.data_parallel(self.main, input, gpu_ids) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function | |
import argparse | |
import random | |
import torch | |
import torch.nn as nn | |
import torch.nn.parallel | |
import torch.backends.cudnn as cudnn | |
import torch.optim as optim | |
import torch.utils.data | |
import torchvision.datasets as dset | |
import torchvision.transforms as transforms | |
import torchvision.utils as vutils | |
from torch.autograd import Variable | |
import os | |
import models.dcgan as dcgan | |
import models.mlp as mlp | |
parser = argparse.ArgumentParser() | |
parser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw ') | |
parser.add_argument('--dataroot', required=True, help='path to dataset') | |
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2) | |
parser.add_argument('--batchSize', type=int, default=64, help='input batch size') | |
parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network') | |
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector') | |
parser.add_argument('--ngf', type=int, default=64) | |
parser.add_argument('--ndf', type=int, default=64) | |
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for') | |
parser.add_argument('--lrD', type=float, default=0.00005, help='learning rate for Critic, default=0.0002') | |
parser.add_argument('--lrG', type=float, default=0.00005, help='learning rate for Generator, default=0.0002') | |
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') | |
parser.add_argument('--cuda' , action='store_true', help='enables cuda') | |
parser.add_argument('--ngpu' , type=int, default=1, help='number of GPUs to use') | |
parser.add_argument('--netG', default='', help="path to netG (to continue training)") | |
parser.add_argument('--netD', default='', help="path to netD (to continue training)") | |
parser.add_argument('--clamp_lower', type=float, default=-0.01) | |
parser.add_argument('--clamp_upper', type=float, default=0.01) | |
parser.add_argument('--Diters', type=int, default=5, help='number of D iters per each G iter') | |
parser.add_argument('--noBN', action='store_true', help='use batchnorm or not (only for DCGAN)') | |
parser.add_argument('--mlp_G', action='store_true', help='use MLP for G') | |
parser.add_argument('--mlp_D', action='store_true', help='use MLP for D') | |
parser.add_argument('--grad_bound', type=float, default=1e10, help='Keep training the disc until the norm of its gradient is below this') | |
parser.add_argument('--n_extra_layers', type=int, default=0, help='Number of extra layers on gen and disc') | |
parser.add_argument('--experiment', default=None, help='Where to store samples and models') | |
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)') | |
opt = parser.parse_args() | |
print(opt) | |
if opt.experiment is None: | |
opt.experiment = 'samples' | |
os.system('mkdir {0}'.format(opt.experiment)) | |
opt.manualSeed = random.randint(1, 10000) # fix seed | |
print("Random Seed: ", opt.manualSeed) | |
random.seed(opt.manualSeed) | |
torch.manual_seed(opt.manualSeed) | |
cudnn.benchmark = True | |
if torch.cuda.is_available() and not opt.cuda: | |
print("WARNING: You have a CUDA device, so you should probably run with --cuda") | |
if opt.dataset in ['imagenet', 'folder', 'lfw']: | |
# folder dataset | |
dataset = dset.ImageFolder(root=opt.dataroot, | |
transform=transforms.Compose([ | |
transforms.Scale(opt.imageSize), | |
transforms.CenterCrop(opt.imageSize), | |
transforms.ToTensor(), | |
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), | |
])) | |
elif opt.dataset == 'lsun': | |
dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'], | |
transform=transforms.Compose([ | |
transforms.Scale(opt.imageSize), | |
transforms.CenterCrop(opt.imageSize), | |
transforms.ToTensor(), | |
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), | |
])) | |
elif opt.dataset == 'cifar10': | |
dataset = dset.CIFAR10(root=opt.dataroot, download=True, | |
transform=transforms.Compose([ | |
transforms.Scale(opt.imageSize), | |
transforms.ToTensor(), | |
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), | |
]) | |
) | |
assert dataset | |
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, | |
shuffle=True, num_workers=int(opt.workers)) | |
ngpu = int(opt.ngpu) | |
nz = int(opt.nz) | |
ngf = int(opt.ngf) | |
ndf = int(opt.ndf) | |
nc = 3 | |
n_extra_layers = int(opt.n_extra_layers) | |
# custom weights initialization called on netG and netD | |
def weights_init(m): | |
classname = m.__class__.__name__ | |
if classname.find('Conv') != -1: | |
m.weight.data.normal_(0.0, 0.02) | |
elif classname.find('BatchNorm') != -1: | |
m.weight.data.normal_(1.0, 0.02) | |
m.bias.data.fill_(0) | |
if opt.noBN: | |
netG = dcgan.DCGAN_G_nobn(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers) | |
elif opt.mlp_G: | |
netG = mlp.MLP_G(opt.imageSize, nz, nc, ngf, ngpu) | |
else: | |
netG = dcgan.DCGAN_G(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers) | |
netG.apply(weights_init) | |
if opt.netG != '': # load checkpoint if needed | |
netG.load_state_dict(torch.load(opt.netG)) | |
print(netG) | |
if opt.noBN: | |
netD = dcgan.DCGAN_D_nobn(opt.imageSize, nz, nc, ndf, ngpu, n_extra_layers) | |
netD.apply(weights_init) | |
elif opt.mlp_D: | |
netD = mlp.MLP_D(opt.imageSize, nz, nc, ndf, ngpu) | |
else: | |
netD = dcgan.DCGAN_D(opt.imageSize, nz, nc, ndf, ngpu, n_extra_layers) | |
netD.apply(weights_init) | |
if opt.netD != '': | |
netD.load_state_dict(torch.load(opt.netD)) | |
print(netD) | |
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) | |
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1) | |
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1) | |
one = torch.FloatTensor([1]) | |
mone = one * -1 | |
if opt.cuda: | |
netD.cuda() | |
netG.cuda() | |
input = input.cuda() | |
one, mone = one.cuda(), mone.cuda() | |
noise, fixed_noise = noise.cuda(), fixed_noise.cuda() | |
input = Variable(input) | |
noise = Variable(noise) | |
fixed_noise = Variable(fixed_noise) | |
# setup optimizer | |
if opt.adam: | |
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.999)) | |
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.999)) | |
else: | |
optimizerD = optim.RMSprop(netD.parameters(), lr = opt.lrD) | |
optimizerG = optim.RMSprop(netG.parameters(), lr = opt.lrG) | |
gen_iterations = 0 | |
for epoch in range(opt.niter): | |
data_iter = iter(dataloader) | |
i = 0 | |
while i < len(dataloader): | |
############################ | |
# (1) Update D network | |
########################### | |
for p in netD.parameters(): # reset requires_grad | |
p.requires_grad = True # they are set to False below in netG update | |
# train the discriminator Diters times | |
if gen_iterations < 25 or gen_iterations % 500 == 0: | |
Diters = 100 | |
else: | |
Diters = opt.Diters | |
grad_D_norm = 0 | |
j = 0 | |
while (j < Diters or grad_D_norm > opt.grad_bound) and i < len(dataloader): | |
j += 1 | |
# clamp parameters to a cube | |
for p in netD.parameters(): | |
p.data.clamp_(opt.clamp_lower, opt.clamp_upper) | |
data = data_iter.next() | |
i += 1 | |
# train with real | |
real_cpu, _ = data | |
netD.zero_grad() | |
batch_size = real_cpu.size(0) | |
input.data.resize_(real_cpu.size()).copy_(real_cpu) | |
errD_real = netD(input) | |
errD_real.backward(one) | |
# train with fake | |
noise.data.resize_(batch_size, nz, 1, 1) | |
noise.data.normal_(0, 1) | |
fake = netG(noise) | |
input.data.copy_(fake.data) | |
errD_fake = netD(input) | |
errD_fake.backward(mone) | |
errD = errD_real - errD_fake | |
optimizerD.step() | |
############################ | |
# (2) Update G network | |
########################### | |
for p in netD.parameters(): | |
p.requires_grad = False # to avoid computation | |
netG.zero_grad() | |
noise.data.normal_(0, 1) | |
fake = netG(noise) | |
errG = netD(fake) | |
errG.backward(one) | |
optimizerG.step() | |
gen_iterations += 1 | |
print('[%d/%d][%d/%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f' | |
% (epoch, opt.niter, gen_iterations, len(dataloader), | |
errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0])) | |
if gen_iterations % 500 == 0: | |
vutils.save_image(real_cpu, '{0}/real_samples.png'.format(opt.experiment)) | |
fake = netG(fixed_noise) | |
vutils.save_image(fake.data, '{0}/fake_samples_{1}.png'.format(opt.experiment, gen_iterations)) | |
# do checkpointing | |
torch.save(netG.state_dict(), '{0}/netG_epoch_{1}.pth'.format(opt.experiment, epoch)) | |
torch.save(netD.state_dict(), '{0}/netD_epoch_{1}.pth'.format(opt.experiment, epoch)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment