Skip to content

Instantly share code, notes, and snippets.

View szagoruyko's full-sized avatar

Sergey Zagoruyko szagoruyko

View GitHub Profile
@szagoruyko
szagoruyko / convertLinear2Conv1x1.lua
Created September 28, 2015 18:16 — forked from fmassa/convertLinear2Conv1x1.lua
Simple example on how to convert a Linear model to a 1x1 convolution
require 'nn'
-- you just need to provide the linear module you want to convert,
-- and the dimensions of the field of view of the linear layer
function convertLinear2Conv1x1(linmodule,in_size)
local s_in = linmodule.weight:size(2)/(in_size[1]*in_size[2])
local s_out = linmodule.weight:size(1)
local convmodule = nn.SpatialConvolutionMM(s_in,s_out,in_size[1],in_size[2],1,1)
convmodule.weight:copy(linmodule.weight)
convmodule.bias:copy(linmodule.bias)
require 'cv.highgui'
require 'cv.videoio'
require 'cv.imgproc'
require 'nn'
--require 'clnn'
local cap = cv.VideoCapture{device=0}
if not cap:isOpened() then
print("Failed to open the default camera")
os.exit(-1)
-- Usage:
-- find . -name model.net -print0 | xargs -0 -n 1 th ~/clearState.lua
require 'cudnn'
local name = arg[1]
assert(paths.filep(name))
print'before'
function nn.Module:replace(callback)
local out = callback(self)
if self.modules then
for i, module in ipairs(self.modules) do
self.modules[i] = module:replace(callback)
end
end
return out
end
ts = {
model = net,
unpack = function(self)
for k,v in ipairs(self.model:listModules()) do
if v.weight and not v.gradWeight then
v.gradWeight = v.weight:clone()
v.gradBias = v.bias:clone()
end
end
return self.model
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
require 'loadcaffe'
require 'optim'
local matio = require 'matio'
local dataset = torch.load('/opt/datasets/tiny-datasets/cifar10_whitened.t7')
local net = loadcaffe.load('/tmp/e56253735ef32c3c296d/train_val.prototxt','./cifar10_nin.caffemodel')
net:evaluate()
print(net)
local SpatialAffine, parent = torch.class('nn.SpatialAffine', 'nn.Module')
function SpatialAffine:__init(nOutput)
parent.__init(self)
self.weight = torch.Tensor(nOutput)
self.bias = torch.Tensor(nOutput)
end
function SpatialAffine:updateOutput(input)
local nFeature = self.weight:numel()