Code for Keras plays catch blog post
python qlearn.py
- Generate figures
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """ | |
import numpy as np | |
import cPickle as pickle | |
import gym | |
# hyperparameters | |
H = 200 # number of hidden layer neurons | |
batch_size = 10 # every how many episodes to do a param update? | |
learning_rate = 1e-4 | |
gamma = 0.99 # discount factor for reward |
# Install build tools | |
sudo apt-get update | |
sudo apt-get install -y build-essential git python-pip libfreetype6-dev libxft-dev libncurses-dev libopenblas-dev gfortran python3-matplotlib libblas-dev liblapack-dev libatlas-base-dev python3-dev python3-pydot linux-headers-generic linux-image-extra-virtual unzip python3-numpy swig python3-pandas python-sklearn unzip python3-pip python3-venv | |
# Install CUDA 7 | |
# wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1410/x86_64/cuda-repo-ubuntu1410_7.0-28_amd64.deb | |
wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1504/x86_64/cuda-repo-ubuntu1504_7.5-18_amd64.deb | |
sudo dpkg -i cuda-repo-ubuntu1504_7.5-18_amd64.deb && rm cuda-repo-ubuntu1504_7.5-18_amd64.deb | |
sudo apt-get update | |
sudo apt-get install -y cuda |
Code for Keras plays catch blog post
python qlearn.py
# Time series forecasting based on multiple time series, including the original one | |
# This script is based on the following examples and discussions: | |
# https://gist.github.com/lukovkin/1aefa4509e066690b892 | |
# https://groups.google.com/forum/#!topic/keras-users/9GsDwkSdqBg | |
import numpy as np | |
import pandas as pd | |
import matplotlib.pyplot as plt | |
import random | |
import theano |
from ib.opt import Connection, message | |
from ib.ext.Contract import Contract | |
from ib.ext.Order import Order | |
from random import randint | |
import time | |
def error_handler(msg): | |
print ("Server Error: %s" % msg) |
import pandas as pd | |
from random import random | |
flow = (list(range(1,10,1)) + list(range(10,1,-1)))*100 | |
pdata = pd.DataFrame({"a":flow, "b":flow}) | |
pdata.b = pdata.b.shift(9) | |
data = pdata.iloc[10:] * random() # some noise | |
import numpy as np |
""" From: http://danielhnyk.cz/predicting-sequences-vectors-keras-using-rnn-lstm/ """ | |
from keras.models import Sequential | |
from keras.layers.core import TimeDistributedDense, Activation, Dropout | |
from keras.layers.recurrent import GRU | |
import numpy as np | |
def _load_data(data, steps = 40): | |
docX, docY = [], [] | |
for i in range(0, data.shape[0]/steps-1): | |
docX.append(data[i*steps:(i+1)*steps,:]) |
##VGG16 model for Keras
This is the Keras model of the 16-layer network used by the VGG team in the ILSVRC-2014 competition.
It has been obtained by directly converting the Caffe model provived by the authors.
Details about the network architecture can be found in the following arXiv paper:
Very Deep Convolutional Networks for Large-Scale Image Recognition
K. Simonyan, A. Zisserman
import seaborn as sns | |
from scipy.optimize import curve_fit | |
# Function for linear fit | |
def func(x, a, b): | |
return a + b * x | |
# Seaborn conveniently provides the data for | |
# Anscombe's quartet. | |
df = sns.load_dataset("anscombe") |