Skip to content

Instantly share code, notes, and snippets.

View viswanathgs's full-sized avatar

Viswanath (Vish) Sivakumar viswanathgs

  • San Francisco, CA
View GitHub Profile
@viswanathgs
viswanathgs / segment-tree.rkt
Created July 3, 2012 21:52
Segment Trees - Range Maximum Query (RMQ) - Scheme Implementation
;; Package for Range Maximum Query (RMQ) operations using Segment Trees.
;; Implemented in Racket v5.2.1.
;; Author: Viswanath Sivakumar <[email protected]>
;; Abstraction for integer ranges.
(define make-range
(lambda (low high) (cons low high)))
(define low
(lambda (range) (car range)))
(define high
#include <event.h>
#include <evhttp.h>
#include <pthread.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <netinet/in.h>
#include <iostream>
#!/usr/bin/env python
import numpy
import theano
import time
def timer(func):
def wrapper(*args, **kwargs):
print("Executing func %s" % func.__name__)
start = time.time()
@viswanathgs
viswanathgs / simple_dqn.py
Created June 27, 2016 21:27
DQN CartPole
import gym
import random
import numpy as np
import tensorflow as tf
class DQN:
REPLAY_MEMORY_SIZE = 10000
RANDOM_ACTION_PROB = 0.5
RANDOM_ACTION_DECAY = 0.99
@viswanathgs
viswanathgs / simple_dqn.py
Created June 27, 2016 23:37
DQN CartPole
import gym
import random
import numpy as np
import tensorflow as tf
class DQN:
REPLAY_MEMORY_SIZE = 10000
RANDOM_ACTION_DECAY = 0.99
MIN_RANDOM_ACTION_PROB = 0.1
@viswanathgs
viswanathgs / cartpole_dqn.py
Created June 28, 2016 05:18
DQN for OpenAI Gym CartPole v0
import gym
import random
import numpy as np
import tensorflow as tf
class DQN:
REPLAY_MEMORY_SIZE = 10000
RANDOM_ACTION_DECAY = 0.99
MIN_RANDOM_ACTION_PROB = 0.1
@viswanathgs
viswanathgs / gpu_io_benchmark.py
Created July 7, 2016 19:26
Compare GPU to GPU broadcast with CPU to GPU broadcast
import argparse
import tensorflow as tf
import time
def run(source_device, num_gpus=4):
shape = [100000000]
source_device = '/%s:0' % source_device
with tf.device(source_device):
weight_init = tf.truncated_normal_initializer()
import numpy as np
import gym
from gym.spaces import Discrete, Box
# ================================================================
# Policies
# ================================================================
class DeterministicDiscreteActionLinearPolicy(object):
import numpy as np, os
os.environ["THEANO_FLAGS"]="device=cpu,floatX=float64"
import theano, theano.tensor as T
import gym
def discount(x, gamma):
"""
Given vector x, computes a vector y such that
y[i] = x[i] + gamma * x[i+1] + gamma^2 x[i+2] + ...
"""
class Trie:
def __init__(self, character, prob):
self.character = character
self.probability = prob
self.children = {}
def add_child(self, child):
assert self.character != '$', "Cannot add child to end token"
if child.character not in self.children:
self.children[child.character] = child