Last active
April 30, 2022 12:37
-
-
Save dvgodoy/610da60fd29e0af7603d1e042a054280 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
| model_vae_cnn = AutoEncoder(encoder_var_cnn, decoder_cnn) | |
| model_vae_cnn.to(device) | |
| loss_fn = nn.MSELoss(reduction='none') | |
| optim = torch.optim.Adam(model_vae_cnn.parameters(), 0.0003) | |
| num_epochs = 30 | |
| train_losses = [] | |
| reconstruction_loss_factor = 1 | |
| for epoch in range(1, num_epochs+1): | |
| batch_losses = [] | |
| for i, (x, _) in enumerate(circles_dl): | |
| model_vae_cnn.train() | |
| x = x.to(device) | |
| # Step 1 - Computes our model's predicted output - forward pass | |
| yhat = model_vae_cnn(x) | |
| # Step 2 - Computes the loss | |
| loss = loss_fn(yhat, x).sum(dim=[1, 2, 3]).sum(dim=0) | |
| kl_loss = model_vae_cnn.enc.kl_loss().sum(dim=1).sum(dim=0) | |
| total_loss = reconstruction_loss_factor * loss + kl_loss | |
| # Step 3 - Computes gradients | |
| total_loss.backward() | |
| # Step 4 - Updates parameters using gradients and the learning rate | |
| optim.step() | |
| optim.zero_grad() | |
| batch_losses.append(np.array([total_loss.data.item(), | |
| loss.data.item(), | |
| kl_loss.data.item()])) | |
| # Average over batches | |
| train_losses.append(np.array(batch_losses).mean(axis=0)) | |
| print(f'Epoch {epoch:03d} | Loss >> {train_losses[-1][0]:.4f}/ \ | |
| {train_losses[-1][1]:.4f}/{train_losses[-1][2]:.4f}') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment