This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from torch.utils.data import DataLoader | |
train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from torch.utils.data import Dataset, TensorDataset | |
class CustomDataset(Dataset): | |
def __init__(self, x_tensor, y_tensor): | |
self.x = x_tensor | |
self.y = y_tensor | |
def __getitem__(self, index): | |
return (self.x[index], self.y[index]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class LayerLinearRegression(nn.Module): | |
def __init__(self): | |
super().__init__() | |
# Instead of our custom parameters, we use a Linear layer with single input and single output | |
self.linear = nn.Linear(1, 1) | |
def forward(self, x): | |
# Now it only takes a call to the layer to make predictions | |
return self.linear(x) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
torch.manual_seed(42) | |
# Now we can create a model and send it at once to the device | |
model = ManualLinearRegression().to(device) | |
# We can also inspect its parameters using its state_dict | |
print(model.state_dict()) | |
lr = 1e-1 | |
n_epochs = 1000 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class ManualLinearRegression(nn.Module): | |
def __init__(self): | |
super().__init__() | |
# To make "a" and "b" real parameters of the model, we need to wrap them with nn.Parameter | |
self.a = nn.Parameter(torch.randn(1, requires_grad=True, dtype=torch.float)) | |
self.b = nn.Parameter(torch.randn(1, requires_grad=True, dtype=torch.float)) | |
def forward(self, x): | |
# Computes the outputs / predictions | |
return self.a + self.b * x |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
torch.manual_seed(42) | |
a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
print(a, b) | |
lr = 1e-1 | |
n_epochs = 1000 | |
# Defines a MSE loss function | |
loss_fn = nn.MSELoss(reduction='mean') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
torch.manual_seed(42) | |
a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
print(a, b) | |
lr = 1e-1 | |
n_epochs = 1000 | |
# Defines a SGD optimizer to update the parameters | |
optimizer = optim.SGD([a, b], lr=lr) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
torch.manual_seed(42) | |
a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
yhat = a + b * x_train_tensor | |
error = y_train_tensor - yhat | |
loss = (error ** 2).mean() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
lr = 1e-1 | |
n_epochs = 1000 | |
torch.manual_seed(42) | |
a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
for epoch in range(n_epochs): | |
yhat = a + b * x_train_tensor | |
error = y_train_tensor - yhat |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# We can specify the device at the moment of creation - RECOMMENDED! | |
torch.manual_seed(42) | |
a = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
b = torch.randn(1, requires_grad=True, dtype=torch.float, device=device) | |
print(a, b) |