- Use SSHFS (installed with Homebrew) to browse remote files
atom-beautify
autopep8
through Anacondauncrustify
through Homebrew
Linter
linter-clang
linter-cppcheck
if opt.hogwild > 1 then | |
local ipc = require 'libipc' | |
local q = ipc.workqueue('examples') | |
local q2 = ipc.workqueue('done') | |
local ids = ipc.workqueue('ids') | |
for i = 1, opt.hogwild do | |
ids:write(i) | |
end | |
-- Initialize the states |
-- Vectorized approach | |
local dict_size = probs:size(2) | |
local max_out_arcs = out_arcs:size(2) | |
local max_in_arcs = in_arcs:size(2) | |
local state_probs = torch.narrow(params.state_probs, 1, 1, seq_len + 1) | |
state_probs.value:fill(0) | |
for i = 2, seq_len + 1 do | |
local starts = torch.ones(max_in_arcs):long() * i | |
local origins = starts:add(-lengths:index(1, in_arcs[i])) | |
local indices = ((origins - 1) * dict_size + 1):add(in_arcs[i] - 1) |
local dict_size = probs:size(2) | |
local max_out_arcs = out_arcs:size(2) | |
local max_in_arcs = in_arcs:size(2) | |
local state_probs = torch.narrow(params.state_probs, 1, 1, seq_len) | |
state_probs.value:fill(0) | |
for i = 2, seq_len do | |
local starts = torch.ones(max_in_arcs):long() * i | |
local origins = starts:add(-lengths:index(1, in_arcs[i])) | |
local indices = ((origins - 1) * dict_size + 1):add(in_arcs[i]) | |
local in_arc_probs = torch.index(probs:view(seq_len * dict_size), 1, indices) |
1 1 2.2973299250376 2.2973299250376 [644/1881] | |
1 2 2.0864643271487 2.2762433652487 | |
1 3 2.1500752827293 2.2636265569968 | |
1 4 1.9636232815994 2.2336262294571 | |
1 5 2.0671982139671 2.2169834279081 | |
1 6 2.1094124953551 2.2062263346528 | |
1 7 1.9331442971793 2.1789181309054 | |
1 8 4.0584201436111 2.366868332176 | |
1 9 1.8639954152644 2.3165810404848 | |
1 10 1.6428338575222 2.2492063221886 |
local function nll(params, probs, out_arcs, out_mask, lengths) | |
local seq_len = probs:size(1) | |
local max_out_arcs = out_arcs:size(2) | |
local state_nll = {0} | |
for i = 1, seq_len do | |
for j = 1, max_out_arcs do | |
if out_mask[{i, j}] ~= 1 then | |
break | |
end | |
local target = i + lengths[out_arcs[{i, j}]] |
local ffi = require 'ffi' | |
ffi.cdef([[ | |
typedef long time_t; | |
typedef struct timeval { | |
time_t tv_sec; | |
time_t tv_usec; | |
}; |
import numpy | |
import theano | |
from theano import tensor, config | |
# The parameters | |
W = theano.shared(numpy.arange(9, dtype=config.floatX).reshape(3, 3)) | |
storage = theano.shared(numpy.zeros((3, 3), dtype=config.floatX)) | |
# The input | |
x = tensor.vector('x') |
grad = require 'autograd' | |
torch = require 'torch' | |
params={ | |
W=torch.range(0, 8):view(3, 3), | |
storage=torch.zeros(3, 3) | |
} | |
function f(params, x) | |
params.storage[2] = params.W * x |
t = require 'torch' | |
grad = require 'autograd' | |
function loop(x, h, y, idxs) | |
for i = 1, x:size(1) do | |
h[idxs[i]] = x[i] | |
if i == 1 then | |
cost = t.pow(y[idxs[i]] - h[idxs[i]], 2) | |
else | |
cost = cost + t.pow(y[idxs[i]] - h[idxs[i]], 2) |