Skip to content

Instantly share code, notes, and snippets.

View MLWhiz's full-sized avatar
🤓
Focusing

Rahul Agarwal MLWhiz

🤓
Focusing
View GitHub Profile
class myClassificationNet(nn.Module):
def __init__(self):
super().__init__()
# Define all Layers Here
self.lin = nn.Linear(784, 10)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, x):
# Connect the layer Outputs here to define the forward pass
x = self.lin(x)
x = self.logsoftmax(x)
num_epochs = 5
for epoch in range(num_epochs):
# Set model to train mode
model.train()
for x_batch,y_batch in train_dataloader:
# Clear gradients
optimizer.zero_grad()
# Forward pass - Predicted outputs
pred = model(x_batch)
# Find Loss and backpropagation of gradients
import numpy as np
train_data_size = 1024
sizes = np.random.randint(low=50,high=300,size=(train_data_size,))
X = [np.random.randint(0,10000, (sizes[i])) for i in range(train_data_size)]
y = np.random.rand(train_data_size).round()
#checking one example in dataset
print((X[0],y[0]))
from torchvision import transforms
from torchvision.datasets import ImageFolder
traindir = "data/train/"
t = transforms.Compose([
transforms.Resize(size=256),
transforms.CenterCrop(size=224),
transforms.ToTensor()])
train_dataset = ImageFolder(root=traindir,transform=t)
print("Num Images in Dataset:", len(train_dataset))
print("Example Image and Label:", train_dataset[2])
class myCustomNeuralNet(nn.Module):
def __init__(self):
super().__init__()
# Define all Layers Here
self.lin1 = myCustomLinearLayer(784,10)
def forward(self, x):
# Connect the layer Outputs here to define the forward pass
x = self.lin1(x)
return x
class myCustomLinearLayer(nn.Module):
def __init__(self,in_size,out_size):
super().__init__()
self.weights = nn.Parameter(torch.randn(in_size, out_size))
self.bias = nn.Parameter(torch.zeros(out_size))
def forward(self, x):
return x.mm(self.weights) + self.bias
class myCrazyNeuralNet(nn.Module):
def __init__(self):
super().__init__()
# Define all Layers Here
self.lin1 = nn.Linear(784, 30)
self.lin2 = nn.Linear(30, 784)
self.lin3 = nn.Linear(30, 10)
def forward(self, x):
# Connect the layer Outputs here to define the forward pass
class myNeuralNet(nn.Module):
def __init__(self):
super().__init__()
# Define all Layers Here
self.lin1 = nn.Linear(784, 30)
self.lin2 = nn.Linear(30, 10)
def forward(self, x):
# Connect the layer Outputs here to define the forward pass
x = self.lin1(x)
x = self.lin2(x)
A = torch.randn(3,4)
W = torch.randn(4,2)
# Multiply Matrix A and W
t = A.mm(W)
print(f"Created Tensor t by Multiplying A and W:\n{t}")
# Transpose Tensor t
t = t.t()
print(f"Transpose of Tensor t:\n{t}")
# Square each element of t
t = t**2
# Using torch.Tensor
t = torch.Tensor([[1,2,3],[3,4,5]])
print(f"Created Tensor Using torch.Tensor:\n{t}")
# Using torch.randn
t = torch.randn(3, 5)
print(f"Created Tensor Using torch.randn:\n{t}")
# using torch.[ones|zeros](*size)
t = torch.ones(3, 5)