Skip to content

Instantly share code, notes, and snippets.

View InnovArul's full-sized avatar
💭
wandering around in the AI space

Arulkumar InnovArul

💭
wandering around in the AI space
View GitHub Profile
@InnovArul
InnovArul / filter_learning.py
Created February 16, 2019 08:14
pytorch forum (Model parameters are not being updated?)
from __future__ import print_function
import torch.nn.functional as F
# from torch.autograd import Variable
# import copy
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
@InnovArul
InnovArul / mars_split_bug.py
Last active January 8, 2019 11:07
Code to find Mars person reid data gallery-query split bug
# taken and improvised from : https://github.com/jiyanggao/Video-Person-ReID/issues/6
import numpy as np
from scipy.io import loadmat
q = loadmat('./mars/info/query_IDX.mat')['query_IDX'][0]
t = loadmat('./mars/info/tracks_test_info.mat')['track_test_info']
query_inds = q - 1 # to get 0 based predefined array indices for query instances
@InnovArul
InnovArul / cuda-extension-pytorch1.0-compile.sh
Created October 9, 2018 09:17
Compilation log for pytorch 1.0 lltm extension in the tutorial
running install
running bdist_egg
running egg_info
creating lltm_cuda.egg-info
writing lltm_cuda.egg-info/PKG-INFO
writing dependency_links to lltm_cuda.egg-info/dependency_links.txt
writing top-level names to lltm_cuda.egg-info/top_level.txt
writing manifest file 'lltm_cuda.egg-info/SOURCES.txt'
reading manifest file 'lltm_cuda.egg-info/SOURCES.txt'
writing manifest file 'lltm_cuda.egg-info/SOURCES.txt'
@InnovArul
InnovArul / pytorch_shared_module_weights.py
Created September 30, 2018 00:10
To validate if the shared module weight is still shared while saving and loading state_dict()
import torch
import torch.nn as nn
class SubModule(nn.Module):
def __init__(self, embedding):
super(SubModule, self).__init__()
self.embedding = embedding
self.fc = nn.Linear(200, 200)
@InnovArul
InnovArul / new_machine.sh
Created August 10, 2018 08:53
installations on new machine
sudo apt-get install zsh
sudo apt-get install git
sudo add-apt-repository ppa:webupd8team/sublime-text-3
sudo apt-get update
sudo apt-get install sublime-text-installer
dropbox
teamviewer
google-chrome
@InnovArul
InnovArul / tied_linear.py
Last active January 6, 2025 23:27
tied linear layer experiment
import torch, torch.nn as nn, torch.nn.functional as F
import numpy as np
import torch.optim as optim
# tied autoencoder using off the shelf nn modules
class TiedAutoEncoderOffTheShelf(nn.Module):
def __init__(self, inp, out, weight):
super().__init__()
self.encoder = nn.Linear(inp, out, bias=False)
self.decoder = nn.Linear(out, inp, bias=False)
import torch
import torch.nn as nn
import torch.nn.functional as F
class model(nn.Module):
def __init__(self):
super(model, self).__init__()
self.conv1 = nn.Conv1d(9, 18, kernel_size=3) #9 input channels, 18 output channels
self.conv2 = nn.Conv1d(18, 36, kernel_size=3) #18 input channels from previous Conv. layer, 36 out
self.conv2_drop = nn.Dropout2d() #dropout
running install
running build_deps
+ USE_CUDA=0
+ USE_ROCM=0
+ USE_NNPACK=0
+ USE_MKLDNN=0
+ USE_GLOO_IBVERBS=0
+ USE_DISTRIBUTED_MW=0
+ FULL_CAFFE2=0
+ [[ 10 -gt 0 ]]
# @Author: arul
# @Date: 2017-11-13T21:45:01+05:30
# @Last modified by: arul
# @Last modified time: 2017-11-13T22:56:47+05:30
# Your init script
#
# Atom will evaluate this file each time a new window is opened. It is run
if(opt.traintype == 'finetuning' and epoch == 1) then
-- find the number of elements for which the learning rates to be reduced
totalParams = 0;
isAfterCrossNeighbor = 0;
dontFreeze = 0;
for index, node in ipairs(model.modules) do
--currParams = node:getParameters()