Skip to content

Instantly share code, notes, and snippets.

@jamesmurdza
Created July 25, 2024 13:32
Show Gist options
  • Save jamesmurdza/3cbab758b98e7f01cf5bce2369d8d914 to your computer and use it in GitHub Desktop.
Save jamesmurdza/3cbab758b98e7f01cf5bce2369d8d914 to your computer and use it in GitHub Desktop.
Neural Network in PyTorch
import torch
import torch.nn as nn
import torch.optim as optim
# Define the model class
class CustomModel(nn.Module):
def __init__(self):
super(CustomModel, self).__init__()
# Define layers
self.fc1 = nn.Linear(18, 10) # First hidden layer
self.fc2 = nn.Linear(10, 20) # Second hidden layer
self.fc3 = nn.Linear(20, 1) # Output layer
# Define activation function
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x)) # First hidden layer with ReLU activation
x = self.relu(self.fc2(x)) # Second hidden layer with ReLU activation
x = self.fc3(x) # Output layer
return x
# Instantiate the model
model2 = CustomModel()
# Define loss function
criterion = nn.MSELoss()
# Define optimizer
optimizer = optim.Adam(model2.parameters())
# Training step
def train_step(x_batch, y_batch):
model2.train() # Set the model to training mode
optimizer.zero_grad() # Zero the gradients
# Forward pass
y_pred = model2(x_batch)
# Compute loss
loss = criterion(y_pred, y_batch)
# Backward pass and optimization
loss.backward()
optimizer.step()
return loss.item()
# Example usage
# x_train and y_train should be your input features and labels as PyTorch tensors
# x_train = torch.tensor(...)
# y_train = torch.tensor(...)
# for epoch in range(epochs):
# loss = train_step(x_train, y_train)
# print(f"Epoch {epoch}, Loss: {loss}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment