Skip to content

Instantly share code, notes, and snippets.

@hiropppe
hiropppe / DBPediaDockerfile
Last active January 19, 2018 08:29
DBPediaDockerfile
FROM ubuntu:16.04
RUN mkdir /root/_INSTALL
WORKDIR /root/_INSTALL
RUN sed -i.bak -e "s%http://archive.ubuntu.com/ubuntu/%http://ftp.jaist.ac.jp/pub/Linux/ubuntu/%g" /etc/apt/sources.list
ENV TZ Asia/Tokyo
RUN apt-get update \
@hiropppe
hiropppe / nvidia-docker2_g3.4xlarge_setup.txt
Last active June 11, 2018 04:58
nvidia-docker2 setup instruction for for Ubuntu 16.04 LTS (Xenial Xerus) on EC2 g3.4xlarge.
[2018.6.11]
3 sudo apt update
4 sudo apt upgrade
6 sudo apt install build-essential
11 wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_9.2.88-1_amd64.deb
12 sudo dpkg -i cuda-repo-ubuntu1604_9.2.88-1_amd64.deb
13 sudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub
14 sudo apt-get update
15 sudo apt-get install cuda
16 nvidia-smi
# -*- coding:utf-8 -*-
import codecs
import json
import os
import sys
if __name__ == '__main__':
def walk_json(path):
@hiropppe
hiropppe / cython_dot_sample_for_nogil.pyx
Last active June 5, 2017 07:31
Simple cython dot product sample for nogil
# cython: boundscheck = False
# cython: wraparound = False
# cython: cdivision = True
import numpy as np
cimport numpy as np
from scipy.linalg cimport cython_blas
from cython.parallel import prange
from libc.stdio cimport printf
from libc.stdlib cimport abort, malloc, free
cimport openmp
def get_int_flag():
cdef int flag = 0
@hiropppe
hiropppe / synchronous_distributed_mnist_training.py
Created February 16, 2017 04:31
Distributed Tensorflow 0.12.0 example of using data parallelism and share model parameters. This is roughly a copy of ischlag (https://github.com/ischlag/distributed-tensorflow-example)
'''
Synchronous Distributed Tensorflow 0.12.0 example of using data parallelism and share model parameters.
Trains a simple sigmoid neural network on mnist for 20 epochs on three machines using one parameter server.
Change the hardcoded host urls below with your own hosts.
Run like this:
pc-01$ python synchronous_distributed_mnist_training.py --job_name="ps" --task_index=0
pc-02$ python synchronous_distributed_mnist_training.py --job_name="worker" --task_index=0
pc-03$ python synchronous_distributed_mnist_training.py --job_name="worker" --task_index=1
@hiropppe
hiropppe / async_distributed_mnist_training.py
Last active February 16, 2017 04:30
Distributed Tensorflow 0.12.0 example of using data parallelism and share model parameters. This is roughly a copy of ischlag (https://github.com/ischlag/distributed-tensorflow-example)
'''
Asynchronous Distributed Tensorflow 0.12.0 example of using data parallelism and share model parameters.
Trains a simple sigmoid neural network on mnist for 20 epochs on three machines using one parameter server.
Change the hardcoded host urls below with your own hosts.
Run like this:
pc-01$ python asynchronous_distributed_mnist_training.py --job_name="ps" --task_index=0
pc-02$ python asynchronous_distributed_mnist_training.py --job_name="worker" --task_index=0
pc-03$ python asynchronous_distributed_mnist_training.py --job_name="worker" --task_index=1
import tables as tb
from scipy.sparse import lil_matrix
h5 = tb.open_file('sparse.h5', 'a')
data_group = h5.create_group(h5.root, 'data_group')
indices_group = h5.create_group(h5.root, 'indices_group')
indptr_group = h5.create_group(h5.root, 'indptr_group')
$ mkdir -p ~/.vim/bundle
$ git clone https://github.com/Shougo/neobundle.vim ~/.vim/bundle/neobundle.vim
"------------------------------------
" neocomplete.vim
"------------------------------------
"Note: This option must set it in .vimrc(_vimrc). NOT IN .gvimrc(_gvimrc)!
" Disable AutoComplPop.
let g:acp_enableAtStartup = 0