Skip to content

Instantly share code, notes, and snippets.

View NegatioN's full-sized avatar

Joakim Rishaug NegatioN

View GitHub Profile
public final class BraveRxJavaSchedulersHook extends RxJavaSchedulersHook {
@Override
public Action0 onSchedule(Action0 action) {
ServerSpanThreadBinder binder = ZipkinHolder.getInstance().serverSpanThreadBinder();
ServerSpan span = binder.getCurrentServerSpan();
return () -> {
binder.setCurrentSpan(span);
action.call();
};
}
@NegatioN
NegatioN / argparse2named.py
Last active September 11, 2018 15:34
ArgParse to namedtuple
from collections import namedtuple
def argparse_to_namedtuple(argparse_parser):
args = {x.dest: parser.get_default(x.dest) for x in argparse_parser._actions}
args.pop('help', None)
ParserTuple = namedtuple('ParserTuple', ' '.join([k for k,v in args.items()]))
return ParserTuple(**args)
@NegatioN
NegatioN / onehot.py
Last active July 7, 2023 17:07
PyTorch Multi-dimensional One hot encoding
def _to_one_hot(y, num_classes):
scatter_dim = len(y.size())
y_tensor = y.view(*y.size(), -1)
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype)
return zeros.scatter(scatter_dim, y_tensor, 1)
print(_to_one_hot(torch.as_tensor([2, 4, 7]), num_classes=10))
print(_to_one_hot(torch.as_tensor([[1, 5 ,6], [2, 4, 7]]), num_classes=10))
@NegatioN
NegatioN / LSEP.py
Created June 20, 2018 13:56
PyTorch LSEP loss function implementation
def _to_one_hot(y, n_dims, dtype=torch.cuda.FloatTensor):
scatter_dim = len(y.size())
y_tensor = y.type(torch.cuda.LongTensor).view(*y.size(), -1)
zeros = torch.zeros(*y.size(), n_dims).type(dtype)
return zeros.scatter(scatter_dim, y_tensor, 1)
class LSEP2(Function):
@NegatioN
NegatioN / warp.py
Last active July 11, 2018 09:09
warp loss
def num_tries_gt_zero(scores, batch_size, max_trials, max_num, device):
'''
returns: [1 x batch_size] the lowest indice per row where scores were first greater than 0. plus 1
'''
tmp = scores.gt(0).nonzero().t()
# We offset these values by 1 to look for unset values (zeros) later
values = tmp[1] + 1
# TODO just allocate normal zero-tensor and fill it?
# Sparse tensors can't be moved with .to() or .cuda() if you want to send in cuda variables first
if device.type == 'cuda':
@NegatioN
NegatioN / circlr.py
Last active July 31, 2018 07:51
Cirular LR
# Circular LR as implemented in fast.ai, however this is not dependent on all the interals of it
class CircularLR:
def __init__(self, optimizer, nb, div=10, pct=10, momentums=None):
self.nb,self.div,self.pct = nb,div,pct
self.cycle_nb = int(nb * (1-pct/100) / 2)
self.opt = optimizer
self.init_lr = self.opt.param_groups[0]['lr']
if momentums is not None:
self.moms = momentums
@NegatioN
NegatioN / find_lr.py
Last active September 14, 2018 11:25
find learning rate from fast.ai
import matplotlib.pyplot as plt
%matplotlib inline
def find_lr(net, criterion, optimizer, trn_loader, init_value = 1e-8, final_value=10., beta = 0.98):
num = len(trn_loader)-1
mult = (final_value / init_value) ** (1/num)
lr = init_value
optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
@NegatioN
NegatioN / no_mutate.py
Created February 14, 2020 04:13
Dicts passed do not mutate.
def f(x, y, z):
x = "hello"
y, z = "swag", "master"
d = {'x': "bleh", 'y': "bleh", 'z': "bleh"}
f(**d)
print(d)
# {'x': 'bleh', 'y': 'bleh', 'z': 'bleh'}
@NegatioN
NegatioN / simple_batch_loader.py
Created March 10, 2020 11:33
A simple batch dataloader able to act on cuda tensors
from torch.utils.data import Dataset, DataLoader
import numpy as np
class BatchSampler(Sampler):
def __init__(self, num_samples, batch_size, shuffle=True):
'''
Samples a 1d sequence as batches of indices
:param num_samples: total number of datapoints (1d data sequence) to be sampled from.
'''
self.num_samples = num_samples
@NegatioN
NegatioN / n_hot.py
Last active April 16, 2020 09:51
Pytorch N-hot encode function
def n_hot(y, num_classes, scatter_dim):
# we assume the masking-value is always -1
# add extra class and shift y's
nc = num_classes+1
y+= 1
y_tensor = y.view(*y.size()[:scatter_dim], -1)
zeros = torch.zeros(*y.size()[:scatter_dim], nc, dtype=y.dtype, device=y.device)
res = zeros.scatter(scatter_dim, y_tensor, 1)
return res.index_select(scatter_dim, torch.arange(1, nc).long())