Created
November 21, 2019 18:13
-
-
Save n0obcoder/e13dc4040222f06d2c9109a1d00faa35 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
loader = {'train': train_loader, 'val': val_loader} | |
epochs = 5 | |
log_interval = 2 | |
# Let's train the model for 5 epochs ! | |
train_losses, val_losses, batch_train_losses, batch_val_losses = trainer(loader, model, loss_fn, optimizer, epochs = epochs, log_interval = log_interval) | |
# Ploting the epoch losses | |
plt.plot(train_losses) | |
plt.plot(val_losses) | |
plt.legend(['train losses', 'val_losses']) | |
plt.title('Loss vs Epoch') | |
plt.figure() | |
plt.plot(batch_train_losses) | |
plt.title('batch_train_losses') | |
plt.figure() | |
plt.plot(batch_val_losses) | |
plt.title('batch_val_losses') | |
# Saving the model(architecture and weights) | |
torch.save(model, 'stage1.pth') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Training started... | |
epoch >>> 1/5 | |
___TRAINING___ | |
batch_loss at batch_idx 01/16: 1.8830947875976562 | |
batch_loss at batch_idx 03/16: 1.7403203248977661 | |
batch_loss at batch_idx 05/16: 1.7298262119293213 | |
batch_loss at batch_idx 07/16: 1.5611944198608398 | |
/opt/conda/lib/python3.6/site-packages/PIL/TiffImagePlugin.py:780: UserWarning: Corrupt EXIF data. Expecting to read 4 bytes but only got 0. | |
warnings.warn(str(msg)) | |
batch_loss at batch_idx 09/16: 1.5026812553405762 | |
batch_loss at batch_idx 11/16: 1.3258850574493408 | |
batch_loss at batch_idx 13/16: 1.2495317459106445 | |
batch_loss at batch_idx 15/16: 1.305968165397644 | |
>>> train loss at epoch 1/5: 1.5462412377120007 | |
___VALIDATION___ | |
batch_loss at batch_idx 01/16: 1.1894911527633667 | |
batch_loss at batch_idx 03/16: 1.152886152267456 | |
>>> val loss at epoch 1/5: 1.1689875689078504 | |
========================= | |
epoch >>> 2/5 | |
___TRAINING___ | |
batch_loss at batch_idx 01/16: 1.0216645002365112 | |
batch_loss at batch_idx 03/16: 1.0510553121566772 | |
batch_loss at batch_idx 05/16: 0.9224186539649963 | |
batch_loss at batch_idx 07/16: 0.8934431672096252 | |
batch_loss at batch_idx 09/16: 0.8022943735122681 | |
batch_loss at batch_idx 11/16: 0.8075667023658752 | |
batch_loss at batch_idx 13/16: 0.7715064287185669 | |
batch_loss at batch_idx 15/16: 0.8646692633628845 | |
>>> train loss at epoch 2/5: 0.8703589484154471 | |
___VALIDATION___ | |
batch_loss at batch_idx 01/16: 0.6990357637405396 | |
batch_loss at batch_idx 03/16: 0.6862280368804932 | |
>>> val loss at epoch 2/5: 0.7019397386415737 | |
========================= | |
epoch >>> 3/5 | |
___TRAINING___ | |
batch_loss at batch_idx 01/16: 0.6628543138504028 | |
batch_loss at batch_idx 03/16: 0.5495110750198364 | |
batch_loss at batch_idx 05/16: 0.4737720787525177 | |
batch_loss at batch_idx 07/16: 0.5540937781333923 | |
batch_loss at batch_idx 09/16: 0.5418666005134583 | |
batch_loss at batch_idx 11/16: 0.483386754989624 | |
batch_loss at batch_idx 13/16: 0.4547680914402008 | |
batch_loss at batch_idx 15/16: 0.4926633834838867 | |
>>> train loss at epoch 3/5: 0.5295033304116471 | |
___VALIDATION___ | |
batch_loss at batch_idx 01/16: 0.4764443635940552 | |
batch_loss at batch_idx 03/16: 0.45040857791900635 | |
>>> val loss at epoch 3/5: 0.4676538405455942 | |
========================= | |
epoch >>> 4/5 | |
___TRAINING___ | |
batch_loss at batch_idx 01/16: 0.3784201443195343 | |
batch_loss at batch_idx 03/16: 0.35650306940078735 | |
batch_loss at batch_idx 05/16: 0.40147092938423157 | |
batch_loss at batch_idx 07/16: 0.3184959292411804 | |
batch_loss at batch_idx 09/16: 0.32096436619758606 | |
batch_loss at batch_idx 11/16: 0.3386695683002472 | |
batch_loss at batch_idx 13/16: 0.32678791880607605 | |
batch_loss at batch_idx 15/16: 0.4038775861263275 | |
>>> train loss at epoch 4/5: 0.3578496524703361 | |
___VALIDATION___ | |
batch_loss at batch_idx 01/16: 0.3420213460922241 | |
batch_loss at batch_idx 03/16: 0.33064761757850647 | |
>>> val loss at epoch 4/5: 0.3362177138722788 | |
========================= | |
epoch >>> 5/5 | |
___TRAINING___ | |
batch_loss at batch_idx 01/16: 0.3416098356246948 | |
batch_loss at batch_idx 03/16: 0.25589874386787415 | |
batch_loss at batch_idx 05/16: 0.2259582132101059 | |
batch_loss at batch_idx 07/16: 0.27338215708732605 | |
batch_loss at batch_idx 09/16: 0.2513640224933624 | |
batch_loss at batch_idx 11/16: 0.25437164306640625 | |
batch_loss at batch_idx 13/16: 0.22256909310817719 | |
batch_loss at batch_idx 15/16: 0.2105967402458191 | |
>>> train loss at epoch 5/5: 0.2632993612836001 | |
___VALIDATION___ | |
batch_loss at batch_idx 01/16: 0.24774116277694702 | |
batch_loss at batch_idx 03/16: 0.24713973701000214 | |
>>> val loss at epoch 5/5: 0.258364795347837 | |
========================= |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment