Skip to content

Instantly share code, notes, and snippets.

@ground0state
Created August 27, 2019 13:42
Show Gist options
  • Save ground0state/f4840db7056bfc8e089cbe9d8bd230f8 to your computer and use it in GitHub Desktop.
Save ground0state/f4840db7056bfc8e089cbe9d8bd230f8 to your computer and use it in GitHub Desktop.
from torch import optim
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
X = digits.data
Y = digits.target
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
X_train = torch.tensor(X_train, dtype=torch.float32).to("cuda:0")
Y_train = torch.tensor(Y_train, dtype=torch.int64).to("cuda:0")
X_test = torch.tensor(X_test, dtype=torch.float32).to("cuda:0")
Y_test = torch.tensor(Y_test, dtype=torch.int64).to("cuda:0")
ds = TensorDataset(X_train, Y_train)
loader = DataLoader(ds, batch_size=32, shuffle=True)
net = nn.Sequential(
nn.Linear(64, 100),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(100, 100),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(100, 100),
nn.ReLU(),
nn.BatchNorm1d(100),
nn.Linear(100, 100),
nn.ReLU(),
nn.BatchNorm1d(100),
nn.Linear(100, 10)
)
net.to("cuda:0")
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters())
train_losses = []
test_losses = []
for epoch in range(100):
running_loss = 0.0
net.train()
for i, (xx, yy) in enumerate(loader):
y_pred = net(xx)
loss = loss_fn(y_pred, yy)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
train_losses.append(running_loss/i)
net.eval()
y_pred = net(X_test)
test_loss = loss_fn(y_pred, Y_test)
test_losses.append(test_loss.item())
class CustomLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, p=0.5):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias)
self.relu = nn.ReLU()
self.drop = nn.Dropout(p)
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
x = self.drop(x)
mlp = nn.Sequential(
CustomLinear(64, 200),
CustomLinear(200, 200),
CustomLinear(200, 200),
nn.Linear(200, 10)
)
class MyMLP(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.ln1 = CustomLinear(in_features, 200)
self.ln2 = CustomLinear(200, 200)
self.ln3 = CustomLinear(200, 200)
self.ln4 = CustomLinear(200, out_features)
def forward(self, x):
x = self.ln1(x)
x = self.ln2(x)
x = self.ln3(x)
x = self.ln4(x)
return x
mlp = MyMLP(64, 10)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment