Skip to content

Instantly share code, notes, and snippets.

@zou3519
Created October 4, 2017 21:50
Show Gist options
  • Save zou3519/47f961bb0745ff8fea04a603ac653431 to your computer and use it in GitHub Desktop.
Save zou3519/47f961bb0745ff8fea04a603ac653431 to your computer and use it in GitHub Desktop.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class MNISTConvNet(nn.Module):
def __init__(self):
super(MNISTConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(10, 20, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
self.cuda(0) # compute on GPU
def forward(self, input):
x = self.pool1(F.relu(self.conv1(input)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
def main():
with torch.cuda.stream(torch.cuda.Stream()):
n = 100
net = MNISTConvNet()
print(net)
input = Variable(torch.randn(n, 1, 28, 28).cuda(0))
out = torch.FloatTensor(n, 10).pin_memory()
out.copy_(net(input).data, async=True)
torch.cuda.synchronize()
print(out.size())
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment