Skip to content

Instantly share code, notes, and snippets.

View ravishchawla's full-sized avatar

Ravish Chawla ravishchawla

View GitHub Profile
@ravishchawla
ravishchawla / data_augmentation.py
Created March 20, 2020 21:19
quora_data_augmentation
nearest_syns = NearestNeighbors(n_neighbors=total_syns+1).fit(embeddings_matrix);
neighbours_mat = nearest_syns.kneighbors(embeddings_matrix[1:top_k])[1];
synonyms = {x[0]: x[1:] for x in neighbours_mat};
def augment_sentence(encoded_sentence, prob = 0.5):
for posit in range(len(encoded_sentence)):
if random.random() > prob:
try:
syns = synonyms[encoded_sentence[posit]];
rand_syn = np.random.choice(syns);
@ravishchawla
ravishchawla / quora_torch_model
Created March 25, 2020 21:05
torch_model_basic.py
class Model(nn.Module):
def __init__(self, embedding_matrix, hidden_unit = 64):
super(Model, self).__init__();
vocab_size = embeddings_tensor.shape[0];
embedding_dim = embeddings_tensor.shape[1];
self.embedding_layer = nn.Embedding(vocab_size, embedding_dim);
self.embedding_layer.weight = nn.Parameter(embeddings_tensor);
self.embedding_layer.weight.requires_grad = True;
@ravishchawla
ravishchawla / quora_torch_model.py
Created March 25, 2020 21:06
Torch Model Basic
class Model(nn.Module):
def __init__(self, embedding_matrix, hidden_unit = 64):
super(Model, self).__init__();
vocab_size = embeddings_tensor.shape[0];
embedding_dim = embeddings_tensor.shape[1];
self.embedding_layer = nn.Embedding(vocab_size, embedding_dim);
self.embedding_layer.weight = nn.Parameter(embeddings_tensor);
self.embedding_layer.weight.requires_grad = True;
@ravishchawla
ravishchawla / quora_model_training.py
Last active March 25, 2020 21:10
Torch Model Training
def train(nn_model, nn_optimizer, nn_criterion, data_loader, val_loader = None, num_epochs = 5, print_ratio = 0.1, verbose=True):
for epoch in range(num_epochs):
# Enable Training for the model
nn_model.train()
running_loss = 0;
for ite, (x, y, l) in enumerate(data_loader):
init_time = time.time();
# Convert our tensors to GPU tensors
Model Configuration Number of Epochs Training F1 Score Validation F1 Score
2 LSTM Layers with 64 Hidden Units 5 epochs 0.637 0.540
2 LSTM Layers with 128 Hidden Units 5 epochs 0.625 0.567
2 LSTM Layers with 128 Hidden Units 15 epochs 0.661 0.558
1 LSTM Layer and a Self Attention Layer 5 epochs 0.771 0.601