This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
public final class BraveRxJavaSchedulersHook extends RxJavaSchedulersHook { | |
@Override | |
public Action0 onSchedule(Action0 action) { | |
ServerSpanThreadBinder binder = ZipkinHolder.getInstance().serverSpanThreadBinder(); | |
ServerSpan span = binder.getCurrentServerSpan(); | |
return () -> { | |
binder.setCurrentSpan(span); | |
action.call(); | |
}; | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from collections import namedtuple | |
def argparse_to_namedtuple(argparse_parser): | |
args = {x.dest: parser.get_default(x.dest) for x in argparse_parser._actions} | |
args.pop('help', None) | |
ParserTuple = namedtuple('ParserTuple', ' '.join([k for k,v in args.items()])) | |
return ParserTuple(**args) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def _to_one_hot(y, num_classes): | |
scatter_dim = len(y.size()) | |
y_tensor = y.view(*y.size(), -1) | |
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype) | |
return zeros.scatter(scatter_dim, y_tensor, 1) | |
print(_to_one_hot(torch.as_tensor([2, 4, 7]), num_classes=10)) | |
print(_to_one_hot(torch.as_tensor([[1, 5 ,6], [2, 4, 7]]), num_classes=10)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def _to_one_hot(y, n_dims, dtype=torch.cuda.FloatTensor): | |
scatter_dim = len(y.size()) | |
y_tensor = y.type(torch.cuda.LongTensor).view(*y.size(), -1) | |
zeros = torch.zeros(*y.size(), n_dims).type(dtype) | |
return zeros.scatter(scatter_dim, y_tensor, 1) | |
class LSEP2(Function): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def num_tries_gt_zero(scores, batch_size, max_trials, max_num, device): | |
''' | |
returns: [1 x batch_size] the lowest indice per row where scores were first greater than 0. plus 1 | |
''' | |
tmp = scores.gt(0).nonzero().t() | |
# We offset these values by 1 to look for unset values (zeros) later | |
values = tmp[1] + 1 | |
# TODO just allocate normal zero-tensor and fill it? | |
# Sparse tensors can't be moved with .to() or .cuda() if you want to send in cuda variables first | |
if device.type == 'cuda': |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Circular LR as implemented in fast.ai, however this is not dependent on all the interals of it | |
class CircularLR: | |
def __init__(self, optimizer, nb, div=10, pct=10, momentums=None): | |
self.nb,self.div,self.pct = nb,div,pct | |
self.cycle_nb = int(nb * (1-pct/100) / 2) | |
self.opt = optimizer | |
self.init_lr = self.opt.param_groups[0]['lr'] | |
if momentums is not None: | |
self.moms = momentums |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import matplotlib.pyplot as plt | |
%matplotlib inline | |
def find_lr(net, criterion, optimizer, trn_loader, init_value = 1e-8, final_value=10., beta = 0.98): | |
num = len(trn_loader)-1 | |
mult = (final_value / init_value) ** (1/num) | |
lr = init_value | |
optimizer.param_groups[0]['lr'] = lr | |
avg_loss = 0. | |
best_loss = 0. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def f(x, y, z): | |
x = "hello" | |
y, z = "swag", "master" | |
d = {'x': "bleh", 'y': "bleh", 'z': "bleh"} | |
f(**d) | |
print(d) | |
# {'x': 'bleh', 'y': 'bleh', 'z': 'bleh'} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from torch.utils.data import Dataset, DataLoader | |
import numpy as np | |
class BatchSampler(Sampler): | |
def __init__(self, num_samples, batch_size, shuffle=True): | |
''' | |
Samples a 1d sequence as batches of indices | |
:param num_samples: total number of datapoints (1d data sequence) to be sampled from. | |
''' | |
self.num_samples = num_samples |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def n_hot(y, num_classes, scatter_dim): | |
# we assume the masking-value is always -1 | |
# add extra class and shift y's | |
nc = num_classes+1 | |
y+= 1 | |
y_tensor = y.view(*y.size()[:scatter_dim], -1) | |
zeros = torch.zeros(*y.size()[:scatter_dim], nc, dtype=y.dtype, device=y.device) | |
res = zeros.scatter(scatter_dim, y_tensor, 1) | |
return res.index_select(scatter_dim, torch.arange(1, nc).long()) |
OlderNewer