Skip to content

Instantly share code, notes, and snippets.

@dvgodoy
Last active April 30, 2022 08:50
Show Gist options
  • Save dvgodoy/8383c09215cf07564af7cc3d0c6703c2 to your computer and use it in GitHub Desktop.
Save dvgodoy/8383c09215cf07564af7cc3d0c6703c2 to your computer and use it in GitHub Desktop.
set_seed(13)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_ae.to(device)
loss_fn = nn.MSELoss()
optim = torch.optim.Adam(model_ae.parameters(), 0.0003)
num_epochs = 10
train_losses = []
for epoch in range(1, num_epochs+1):
batch_losses = []
for i, (x, _) in enumerate(circles_dl):
model_ae.train()
x = x.to(device)
# Step 1 - Computes our model's predicted output - forward pass
yhat = model_ae(x)
# Step 2 - Computes the loss
loss = loss_fn(yhat, x)
# Step 3 - Computes gradients
loss.backward()
# Step 4 - Updates parameters using gradients and the learning rate
optim.step()
optim.zero_grad()
batch_losses.append(np.array([loss.data.item()]))
# Average over batches
train_losses.append(np.array(batch_losses).mean(axis=0))
print(f'Epoch {epoch:03d} | Loss >> {train_losses[-1][0]:.4f}')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment