Skip to content

Instantly share code, notes, and snippets.

View SubhadityaMukherjee's full-sized avatar

Subhaditya Mukherjee SubhadityaMukherjee

View GitHub Profile
#%%capture
fig = plt.figure(figsize=(8,8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
ani.save("anim.mp4")
HTML(ani.to_jshtml())
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses,label="G")
plt.plot(D_losses,label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
img_list = []
G_losses = []
D_losses = []
iters = 0
for epoch in range(num_epochs):

    for i, data in enumerate(dataloader, 0):
    	# Part 1
        netD.zero_grad()
criterion = nn.BCELoss()
fixed_noise = torch.randn(64, nz, 1, 1, device=device)
real_label = 1
fake_label = 0
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
netD = Discriminator(ngpu).to(device)
if (device.type == 'cuda') and (ngpu > 1):
 netD = nn.DataParallel(netD, list(range(ngpu)))
netD.apply(weights_init)
print(netD)
class Discriminator(nn.Module):
    def __init__(self, ngpu):
        super(Discriminator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
netG = Generator(ngpu).to(device)

# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
    netG = nn.DataParallel(netG, list(range(ngpu)))
netG.apply(weights_init)
print(netG)
class Generator(nn.Module):
    def __init__(self, ngpu):
        super(Generator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') !=-1:
        nn.init.normal_(m.weight.data,0.0,.02)
    elif classname.find('BatchNorm') !=-1:
        nn.init.normal_(m.weight.data,1.0,.02)
        nn.init.constant_(m.bias.data,0)
device = torch.device("cuda")
real_batch = next(iter(dataloader))
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.imshow(
    np.transpose(
        vutils.make_grid(real_batch[0].to(device)[:64],
                         padding=2,
 normalize=True).cpu(), (1, 2,0)))