Install Atom from https://atom.io or with brew cask install atom
Then install packages (example apm install hydrogen
, apm is a package manager for atom):
- hydrogen
- autocomplete-paths
- language-lua
#!/usr/bin/env python | |
import subprocess | |
import re | |
class bcolors: | |
HEADER = '\033[95m' | |
OKBLUE = '\033[94m' | |
OKGREEN = '\033[92m' | |
WARNING = '\033[93m' |
function removeDescriptors(net) | |
for i,val in pairs(net.modules) do | |
if tostring(val):find('cudnn') then | |
for name,field in pairs(val) do | |
if name:find('Desc') then | |
val[name] = nil | |
end | |
end | |
val.algType = nil | |
val.iDesc = nil |
Using 1-th gpu | |
Loading ./data/ptb.train.txt, size of data = 929589 | |
Loading ./data/ptb.valid.txt, size of data = 73760 | |
Loading ./data/ptb.test.txt, size of data = 82430 | |
Network parameters: | |
{ | |
layers : 2 | |
lr : 1 | |
max_max_epoch : 13 | |
max_grad_norm : 5 |
Using 1-th gpu | |
Loading ./data/ptb.train.txt, size of data = 929589 | |
Loading ./data/ptb.valid.txt, size of data = 73760 | |
Loading ./data/ptb.test.txt, size of data = 82430 | |
Network parameters: | |
{ | |
layers : 2 | |
lr : 1 | |
max_max_epoch : 13 | |
max_grad_norm : 5 |
local BidirectionalSequencer, parent = torch.class('nn.BidirectionalSequencer', 'nn.Container') | |
function BidirectionalSequencer:__init(module_forward, module_backward, nOutputSize) | |
parent.__init(self) | |
self.module_forward = module_forward | |
self.module_backward = module_backward | |
self.modules[1] = nn.Sequencer(module_forward) | |
self.modules[2] = nn.Sequencer(module_backward) | |
self.output = {} |
require 'nn' | |
local vgg = nn.Sequential() | |
-- building block | |
local function ConvBNReLU(nInputPlane, nOutputPlane) | |
vgg:add(nn.SpatialConvolution(nInputPlane, nOutputPlane, 3,3, 1,1, 1,1)) | |
vgg:add(nn.SpatialBatchNormalization(nOutputPlane,1e-3)) | |
vgg:add(nn.ReLU(true)) | |
return vgg | |
end |
-- a script to simplify trained net by incorporating every SpatialBatchNormalization to SpatialConvolution | |
-- and BatchNormalization to Linear | |
local function BNtoConv(net) | |
for i,v in ipairs(net.modules) do | |
if v.modules then | |
BNtoConv(v) | |
else | |
if torch.typename(v) == 'nn.SpatialBatchNormalization' and | |
(torch.typename(net:get(i-1)):find'SpatialConvolution') then |
local Linear, parent = torch.class('nn.NoBiasLinear', 'nn.Linear') | |
function Linear:__init(inputSize, outputSize) | |
parent.__init(self, inputSize, outputSize) | |
self.bias:fill(0) | |
end | |
function Linear:accGradParameters(input, gradOutput, scale) | |
scale = scale or 1 | |
if input:dim() == 1 then |
Install Atom from https://atom.io or with brew cask install atom
Then install packages (example apm install hydrogen
, apm is a package manager for atom):
All types torch.DoubleTensor
, torch.FloatTensor
, etc. should have their
sparse variants: torch.SparseDoubleTensor
, torch.SparseFloatTensor
, etc.
Copying between dense and sparse matrix should be done with :copy() function.
Underlying BLAS has to be swappable with MKL/OpenBLAS/Atlas, etc. Other math operations have to implemented with CSPARSE.