Skip to content

Instantly share code, notes, and snippets.

View albanD's full-sized avatar
🌄
Recharging until end of Nov

albanD

🌄
Recharging until end of Nov
View GitHub Profile
require 'nn'
require 'stn'
------
-- Prepare your localization network
local localization_network = torch.load('your_locnet.t7')
------
-- prepare both branches of the st
local ct = nn.ConcatTable()
function networks.convs_noutput(convs, input_size)
input_size = input_size or networks.base_input_size
-- Get the number of channels for conv that are multiscale or not
local nbr_input_channels = convs[1]:get(1).nInputPlane or
convs[1]:get(1):get(1).nInputPlane
local output = torch.Tensor(1, nbr_input_channels, input_size, input_size)
for _, conv in ipairs(convs) do
output = conv:forward(output)
end
return output:nElement(), output:size(3)
@albanD
albanD / test.lua
Last active September 30, 2015 14:46
require 'nn'
require 'stn' -- github.com/qassemoquab/stnbhwd by Maxime Oquab
local localization_network = torch.load('your_locnet.t7')
local ct = nn.ConcatTable()
local branch1 = nn.Transpose({3,4},{2,4})
local branch2 = nn.Sequential()
local threads = require "threads"
threads.Threads.serialization('threads.sharedserialize')
n_task = 3
local pools = {}
for task=1,n_task do
pools[task] = threads.Threads(5,
function()
-- Needed only for serialized elements
@albanD
albanD / hessian.py
Created September 25, 2019 21:03
Compute full Hessian of a network
import torch
from torch import nn
from torchviz import make_dot
from torch.autograd.gradcheck import gradcheck
torch.set_default_tensor_type(torch.DoubleTensor)
my_mod = nn.Sequential(nn.Linear(2, 2, bias=False), nn.Sigmoid(), nn.Linear(2, 2, bias=False), nn.Sigmoid(), nn.Linear(2, 1, bias=False))
params = list(my_mod.parameters())
@albanD
albanD / linear_jit_debug.md
Last active October 16, 2019 19:27
Autodiff linear debugging

Debugging code

std::cout << "Forwarding into jit module" << std::endl;
std::cout << "Forward code:" << std::endl;
std::cout << *grad.f.get() << std::endl;
std::cout << "Backward code:" << std::endl;
std::cout << *grad.df.get() << std::endl;
std::cout << "End print !" << std::endl;
import torch
from torch import nn
from torch.nn import functional as F
class EasyDataParallel(nn.Module):
def __init__(self, gpus):
super().__init__()
# Handle cpu / 1 gpu case better
assert isinstance(gpus, list)
from patch_convolution import *
import torch
import torch.nn as nn
import time
# ---------------
# Parameters
# ---------------
# Number of profile iterations to run
itt = 30
@albanD
albanD / common_dtype.md
Last active May 18, 2020 19:21
Python function common dtype

Ops to test on python side

If nothing is specified, all argument combination should be considered

CPU and GPU

  • copy_ no_sparse && no_quantize && self!=source && not_copy_transpose
  • gather
  • gather(out=)
  • scatter_(Tensor)
  • scatter(Tensor)
  • scatter_(value)
@albanD
albanD / opt_as_hook.py
Last active August 8, 2023 07:49
PyTorch optimizer as hook
import torch
from torch import nn
from torch.optim.sgd import sgd
import gc
import objgraph
import weakref
def all():
# Only a subset of the args you could have
def set_sgd_hook(mod, p, lr, weight_decay, momentum):