Skip to content

Instantly share code, notes, and snippets.

View bartvm's full-sized avatar

Bart van Merriënboer bartvm

View GitHub Profile
import sys
import theano
from theano import tensor
import numpy as np
def main(length):
# Sequential input
x = tensor.vector('x')
t = require 'torch'
grad = require 'autograd'
function loop(p, y, idxs)
-- Only works if h is a derivable value as well
x = p.x
h = p.h
for i = 1, x:size(1) do
h[idxs[i]] = x[i]
end
import theano
from theano import tensor
import numpy as np
def main():
x = tensor.vector('x')
y = tensor.alloc(np.float32(0), *x.shape)
def step(x_elem, t, prev_y):
new_y = tensor.set_subtensor(prev_y[t], x_elem)
from theano import tensor
from blocks.bricks import Linear, Rectifier, Softmax
from blocks.bricks.cost import CategoricalCrossEntropy
from blocks.roles import WEIGHT
from blocks.graph import ComputationGraph
from blocks.filter import VariableFilter
from blocks.initialization import IsotropicGaussian, Constant
from blocks.algorithms import GradientDescent, Scale
from blocks.log.log import TrainingLog
@bartvm
bartvm / PIA.sh
Last active September 8, 2017 03:42
cd openvpn
connections=(*.ovpn)
i=1
for filename in "${connections[@]}"; do
extension="${filename##*.}"
base="${filename%.*}"
printf -v j "%02d" $((i+10))
mkdir "$j"
cp crl.rsa.2048.pem "$j/crl.rsa.2048.pem"
cp ca.rsa.2048.crt "$j/ca.rsa.2048.crt"
SOURCE=en
TARGET=fr
SCRIPTS="$(pwd)/mosesdecoder/scripts"
# Create the data directory
mkdir -p IWSLT15 && cd IWSLT15
# Download and unpack the data
# wget "https://wit3.fbk.eu/archive/2015-01/texts/$SOURCE/$TARGET/$SOURCE-$TARGET.tgz"
tar xvfz "$SOURCE-$TARGET.tgz"
cd "$SOURCE-$TARGET"
for lang in $SOURCE $TARGET; do
@bartvm
bartvm / gist:3e3d169a7d4154353981
Last active December 3, 2015 20:05
Pseudo-code multi-GPU

Each worker is roughly of the following form:

# socket is some form of communication
def train(id, device, batch_queue, socket):
  construct_graph_and_compile()
  if id == 1:
      # This is the first process, so memory map the parameters
      m_params = memory_map(params)
      # Send them to the main thread
DROP TABLE IF EXISTS data;
CREATE TABLE data (
hostname TEXT,
gpu_id TEXT,
gpu_name TEXT,
timestamp INTEGER,
utilization REAL
);
import sqlite3
import xmltodict
from datetime import datetime
from flask import g, Flask, request
app = Flask(__name__)
DATABASE = 'data.db'
# Run the following command on the hosts:
# nvidia-smi -q -x -l 60 | curl -X POST -d @- http://eos12:5000/$HOSTNAME
import numpy as np
import timeit
from matplotlib import pyplot as plt
n = 1000
t1s, t2s = [], []
for p in np.linspace(0, 1, num=20):
y = np.random.binomial(1, p, n)
x = np.random.rand(n)
t1 = timeit.timeit('np.sum(x * y)', 'import numpy as np; from __main__ import x, y', number=100000)