Skip to content

Instantly share code, notes, and snippets.

View InnovArul's full-sized avatar
💭
wandering around in the AI space

Arulkumar InnovArul

💭
wandering around in the AI space
View GitHub Profile
from torch.utils.data import DataLoader, Dataset
import torch, torch.nn as nn
import numpy as np
class DS(Dataset):
# Constructor
def __init__(self):
super().__init__()
X = list(np.arange(15000))
self.x = X
import torch, torch.nn as nn, torch.nn.functional as F
def perform_non_parallelconv(input, convs):
outs = []
for i in range(len(convs)):
o = convs[i](input[:, i])
outs.append(o)
outs = torch.cat(outs, dim=1)
return outs
import torch
with torch.autograd.set_detect_anomaly(True):
x = torch.randn(5,6, requires_grad=True)
z = x.sum(-1)
z += z * z
z.sum().backward()
import torch
import torch.nn.functional as F
is_cuda = True
input = torch.randn(1, 19, 32, 64, requires_grad=True)
target = torch.randint(22, size=(1,32,64,))
print(input.dim())
print("number of out-of-bound targets", (target > 18).sum())
@InnovArul
InnovArul / video_lectures.sh
Created February 9, 2021 01:43
to preprocess dvp video lectures
mv Day.. day_ # move Day folder to day_ folder
# move MTS files to root dir
mv ./01/2019/* .
# to concate MTS files into MP4
ffmpeg -i "concat:$(echo *.MTS | tr ' ' '|')" -strict -2 concat_out.mp4
@InnovArul
InnovArul / torch_extn.sh
Last active October 25, 2021 21:27
to build torch extension for all cuda architectures
TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5 8.0 8.6+PTX" python setup.py build
python setup.py install
D:\cmder_mini\Cmder.exe "%ActivDir%"
@InnovArul
InnovArul / reset_params.py
Created November 18, 2021 20:03
deepcopy and reset params
import torchvision, copy
import torch, torch.nn as nn
def reset_all_weights(model: nn.Module) -> None:
"""
refs:
- https://discuss.pytorch.org/t/how-to-re-set-alll-parameters-in-a-network/20819/6
- https://stackoverflow.com/questions/63627997/reset-parameters-of-a-neural-network-in-pytorch
- https://pytorch.org/docs/stable/generated/torch.nn.Module.html
"""
import torch, torch.nn as nn
class LowlevelModule(nn.Module):
def __init__(self, custom_val):
super().__init__()
self.custom_val = custom_val
def print_custom_val(self):
print(self.custom_val.item())
@InnovArul
InnovArul / linear_partial_freeze_no_weightdecay.py
Last active November 20, 2021 21:16
to freeze weights and avoid weight decay of frozen weights
import torch, torch.nn as nn
import torch.optim as optim, torch.nn.functional as F
class CustomLinearNoWeightDecay(nn.Module):
def __init__(self, mask):
super().__init__()
self.register_buffer("mask", mask)
out_channels, in_channels = mask.shape
self.weight = nn.Parameter(torch.randn(out_channels, in_channels))
@InnovArul
InnovArul / create_user.sh
Created February 17, 2022 05:29
create users - ubuntu
command:
--------
sudo useradd -m -d /home/<user> -s /bin/bash -c "<rollnumber>" -U <user>
password:
---------
sudo passwd <user>
Add user to sudo
-----------------