Skip to content

Instantly share code, notes, and snippets.

@michaelfortunato
Created July 31, 2025 01:15
Show Gist options
  • Save michaelfortunato/f35014dd8640ccb6ad9b8a6f4b281957 to your computer and use it in GitHub Desktop.
Save michaelfortunato/f35014dd8640ccb6ad9b8a6f4b281957 to your computer and use it in GitHub Desktop.
Neural Network Fundamentals
def train(
model: MikesNN,
train_loader: DataLoader,
config: ExperimentConfig,
logger: Logger | None = None,
):
num_epochs = config.num_epochs
learning_rate = config.learning_rate
device = config.device
logger = logger or StdoutLogger()
model.to(device)
model.train()
optimizer = AdamW(model.parameters(), lr=learning_rate)
criteria = F.mse_loss
for i in range(num_epochs):
progress_bar = tqdm(train_loader)
epoch_losses = []
for batch_idx, batch in enumerate(progress_bar):
batch.to(device)
optimizer.zero_grad()
# loss = criteria(model(batch), batch.y.unsqueeze(1).float())
loss = criteria(model(batch), batch.y.unsqueeze(1).float())
loss.backward()
optimizer.step()
progress_bar.set_postfix_str(
f'Loss at {batch_idx}: {loss.item():.4f}'
)
epoch_losses.append(loss.item())
# pyrefly: ignore # no-matching-overload
epoch_mean_loss = np.mean(np.array(epoch_losses))
logger.info(f'Loss at epoch {i}: {epoch_mean_loss}')
logger.log_dict({'loss': epoch_mean_loss})
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment