This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Using torch.Tensor | |
t = torch.Tensor([[1,2,3],[3,4,5]]) | |
print(f"Created Tensor Using torch.Tensor:\n{t}") | |
# Using torch.randn | |
t = torch.randn(3, 5) | |
print(f"Created Tensor Using torch.randn:\n{t}") | |
# using torch.[ones|zeros](*size) | |
t = torch.ones(3, 5) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def collate_text(batch): | |
# get text sequences in batch | |
data = [item[0] for item in batch] | |
# get labels in batch | |
target = [item[1] for item in batch] | |
# get max_seq_length in batch | |
max_seq_len = max([len(x) for x in data]) | |
# pad text sequences based on max_seq_len | |
data = [np.pad(p, (0, max_seq_len - len(p)), 'constant') for p in data] | |
# convert data and target to tensor |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class CustomTextDataset(Dataset): | |
''' | |
Simple Dataset initializes with X and y vectors | |
We start by sorting our X and y vectors by sequence lengths | |
''' | |
def __init__(self,X,y=None): | |
self.data = list(zip(X,y)) | |
# Sort by length of first element in tuple | |
self.data = sorted(self.data, key=lambda x: len(x[0])) | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class BiLSTM(nn.Module): | |
def __init__(self): | |
super().__init__() | |
self.hidden_size = 64 | |
drp = 0.1 | |
max_features, embed_size = 10000,300 | |
self.embedding = nn.Embedding(max_features, embed_size) | |
self.lstm = nn.LSTM(embed_size, self.hidden_size, bidirectional=True, batch_first=True) | |
self.linear = nn.Linear(self.hidden_size*4 , 64) | |
self.relu = nn.ReLU() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
t = transforms.Compose([ | |
transforms.Resize(size=256), | |
transforms.CenterCrop(size=224), | |
transforms.ToTensor()]) | |
train_dataset = customImageFolderDataset(root=traindir,transform=t) | |
train_dataloader = DataLoader(train_dataset,batch_size = 64, shuffle=True, num_workers=10) | |
for image_batch, label_batch in train_dataloader: | |
pred = myImageNeuralNet(image_batch) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
t = transforms.Compose([ | |
transforms.Resize(size=256), | |
transforms.CenterCrop(size=224), | |
transforms.ToTensor()]) | |
train_dataset = torchvision.datasets.ImageFolder(root=traindir,transform=t) | |
train_dataloader = DataLoader(train_dataset,batch_size = 64, shuffle=True, num_workers=10) | |
for image_batch, label_batch in train_dataloader: | |
pred = myImageNeuralNet(image_batch) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from glob import glob | |
from PIL import Image | |
from torch.utils.data import Dataset | |
class customImageFolderDataset(Dataset): | |
"""Custom Image Loader dataset.""" | |
def __init__(self, root, transform=None): | |
""" | |
Args: | |
root (string): Path to the images organized in a particular folder structure. |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def greeedy_decode_sentence(model,sentence): | |
model.eval() | |
sentence = SRC.preprocess(sentence) | |
indexed = [] | |
for tok in sentence: | |
if SRC.vocab.stoi[tok] != 0 : | |
indexed.append(SRC.vocab.stoi[tok]) | |
else: | |
indexed.append(0) | |
sentence = Variable(torch.LongTensor([indexed])).cuda() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def train(train_iter, val_iter, model, optim, num_epochs,use_gpu=True): | |
train_losses = [] | |
valid_losses = [] | |
for epoch in range(num_epochs): | |
train_loss = 0 | |
valid_loss = 0 | |
# Train model | |
model.train() | |
for i, batch in enumerate(train_iter): | |
src = batch.src.cuda() if use_gpu else batch.src |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class PositionalEncoding(nn.Module): | |
def __init__(self, d_model, dropout=0.1, max_len=5000): | |
super(PositionalEncoding, self).__init__() | |
self.dropout = nn.Dropout(p=dropout) | |
self.d_model = d_model | |
pe = torch.zeros(max_len, d_model) | |
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) | |
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) | |
pe[:, 0::2] = torch.sin(position * div_term) | |
pe[:, 1::2] = torch.cos(position * div_term) |