Skip to content

Instantly share code, notes, and snippets.

@conormm
Created February 28, 2018 21:59
Show Gist options
  • Select an option

  • Save conormm/4032d51d3745aee5dbf383a1ecbd2399 to your computer and use it in GitHub Desktop.

Select an option

Save conormm/4032d51d3745aee5dbf383a1ecbd2399 to your computer and use it in GitHub Desktop.
Torch regression example on boston housing dataset.
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler
X, y = load_boston(return_X_y=True)
input_size = 13
hidden_layer_size = 300
learning_rate = 0.05
batch_size = 50
num_epochs = 50
class PrepareData(Dataset):
def __init__(self, X, y, scale_X=True):
if not torch.is_tensor(X):
if scale_X:
X = StandardScaler().fit_transform(X)
self.X = torch.from_numpy(X)
if not torch.is_tensor(y):
self.y = torch.from_numpy(y)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
ds = PrepareData(X, y=y, scale_X=True)
datagen = DataLoader(ds, batch_size=batch_size, shuffle=True)
class RegressionModel(nn.Module):
def __init__(self, input_size, hidden_size):
super(RegressionModel, self).__init__()
self.dense_h1 = nn.Linear(in_features=input_size, out_features=hidden_size)
self.relu_h1 = nn.ReLU()
self.dense_out = nn.Linear(in_features=hidden_size, out_features=1)
def forward(self, X):
out = self.relu_h1(self.dense_h1(X))
out = self.dense_out(out)
return out
m = RegressionModel(input_size=input_size, hidden_size=hidden_layer_size)
cost_func = nn.MSELoss()
optimizer = torch.optim.Adam(m.parameters(), lr=learning_rate)
for e in range(num_epochs):
batch_losses = []
for ix, (Xb, yb) in enumerate(datagen):
_X = Variable(Xb).float()
_y = Variable(yb).float()
#==========Forward pass===============
preds = m(_X)
loss = cost_func(preds, _y)
#==========backward pass==============
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_losses.append(loss.data[0])
mbl = np.mean(batch_losses).round(3)
if e % 5 == 0:
print("Epoch [{}/{}], Batch loss: {}".format(e, num_epochs, mbl))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment