Created
April 5, 2021 14:36
-
-
Save malfet/ff42dc075826c1e45a44d7f0f5819249 to your computer and use it in GitHub Desktop.
Run CIFAR
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env python3 | |
| # Results of recent runs: | |
| # Mac Apple M1 50.3 sec | |
| # Mac Intel(R) Core(TM) i9-9980HK CPU @ 2.40GH in 61.1 sec | |
| # Linux Intel(R) Xeon(R) W-2135 CPU @ 3.70GHz in 53.5 sec | |
| import time | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| import torch.optim as optim | |
| import torchvision | |
| import torchvision.transforms as transforms | |
| def get_cpu_name(): | |
| import platform | |
| from subprocess import check_output | |
| if platform.system() == "Darwin": | |
| return check_output(["sysctl", "-n", "machdep.cpu.brand_string"]).decode("latin").strip() | |
| if platform.system() == "Linux": | |
| for line in check_output(["cat", "/proc/cpuinfo"]).decode("latin").split("\n"): | |
| if "model name" in line: | |
| return line.split(":",1)[1].strip() | |
| return platform.processor() | |
| class Net(nn.Module): | |
| def __init__(self): | |
| super(Net, self).__init__() | |
| self.conv1 = nn.Conv2d(3, 6, 5) | |
| self.pool = nn.MaxPool2d(2, 2) | |
| self.conv2 = nn.Conv2d(6, 16, 5) | |
| self.fc1 = nn.Linear(16 * 5 * 5, 120) | |
| self.fc2 = nn.Linear(120, 84) | |
| self.fc3 = nn.Linear(84, 10) | |
| def forward(self, x): | |
| x = self.pool(F.relu(self.conv1(x))) | |
| x = self.pool(F.relu(self.conv2(x))) | |
| x = x.view(-1, 16 * 5 * 5) | |
| x = F.relu(self.fc1(x)) | |
| x = F.relu(self.fc2(x)) | |
| x = self.fc3(x) | |
| return x | |
| def main(): | |
| transform = transforms.Compose( | |
| [transforms.ToTensor(), | |
| transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) | |
| trainset = torchvision.datasets.CIFAR10(root='./data', train=True, | |
| download=True, transform=transform) | |
| trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, | |
| shuffle=True, num_workers=2) | |
| testset = torchvision.datasets.CIFAR10(root='./data', train=False, | |
| download=True, transform=transform) | |
| testloader = torch.utils.data.DataLoader(testset, batch_size=4, | |
| shuffle=False, num_workers=2) | |
| net = Net() | |
| criterion = nn.CrossEntropyLoss() | |
| optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) | |
| start_time = time.time() | |
| for epoch in range(2): # loop over the dataset multiple times | |
| running_loss = 0.0 | |
| for i, data in enumerate(trainloader, 0): | |
| # get the inputs; data is a list of [inputs, labels] | |
| inputs, labels = data | |
| # zero the parameter gradients | |
| optimizer.zero_grad() | |
| # forward + backward + optimize | |
| outputs = net(inputs) | |
| loss = criterion(outputs, labels) | |
| loss.backward() | |
| optimizer.step() | |
| # print statistics | |
| running_loss += loss.item() | |
| if i % 2000 == 1999: # print every 2000 mini-batches | |
| print('[%d, %5d] loss: %.3f' % | |
| (epoch + 1, i + 1, running_loss / 2000)) | |
| running_loss = 0.0 | |
| end_time = time.time() | |
| print(f'Finished Training on {get_cpu_name()} in {end_time-start_time:.2f} seconds') | |
| PATH = './cifar_net.pth' | |
| torch.save(net.state_dict(), PATH) | |
| if __name__ == '__main__': | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment