Skip to content

Instantly share code, notes, and snippets.

@f0nzie
Created June 11, 2019 04:41
Show Gist options
  • Save f0nzie/13ca0555c29867a36ef7a98452a8fa70 to your computer and use it in GitHub Desktop.
Save f0nzie/13ca0555c29867a36ef7a98452a8fa70 to your computer and use it in GitHub Desktop.
PyTorch - Linear Regression in Python
# One chunk - Linear Regression in Python
Source: https://medium.com/dsnet/linear-regression-with-pytorch-3dde91d60b50
Notebook: https://jvn.io/aakashns/e556978bda9343f3b30b3a9fd2a25012
```{r}
library(reticulate)
use_condaenv("pytorch37")
py_config()
```
```{python}
import numpy as np
import torch
torch.manual_seed(0) # reproducible
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43],
[91, 88, 64],
[87, 134, 58],
[102, 43, 37],
[69, 96, 70]], dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70],
[81, 101],
[119, 133],
[22, 37],
[103, 119]], dtype='float32')
# Convert inputs and targets to tensors
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
print(inputs)
print(targets)
# random weights and biases
w = torch.randn(2, 3, requires_grad=True)
b = torch.randn(2, requires_grad=True)
print(w)
print(b)
# function for the model
def model(x):
wt = w.t()
mm = x @ w.t()
return x @ w.t() + b # @ represents matrix multiplication in PyTorch
# Generate predictions
preds = model(inputs)
print(preds)
print(targets)
# MSE loss function
def mse(t1, t2):
diff = t1 - t2
return torch.sum(diff * diff) / diff.numel()
# Compute loss
loss = mse(preds, targets)
print(loss)
# 33060
# Compute gradients
loss.backward()
# Gradients for weights
print(w)
print(w.grad)
# Reset the gradients
w.grad.zero_()
b.grad.zero_()
print(w.grad)
print(b.grad)
# Generate predictions
preds = model(inputs)
print(preds)
# Calculate the loss
loss = mse(preds, targets)
print(loss)
# Compute gradients
loss.backward()
print(w.grad)
print(b.grad)
# Adjust weights and reset gradients
with torch.no_grad():
print(w); print(b) # requires_grad attribute remains
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
print(w)
print(b)
# Calculate loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
# Train for 100 epocs
for i in range(100):
preds = model(inputs)
loss = mse(preds, targets)
loss.backward()
with torch.no_grad():
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
# Calculate loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
# Calculate loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
# predictions
preds
# Targets
targets
```
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment