Skip to content

Instantly share code, notes, and snippets.

@bryan-lunt
Created February 15, 2016 18:21
Show Gist options
  • Save bryan-lunt/b20b5f15935ab077a400 to your computer and use it in GitHub Desktop.
Save bryan-lunt/b20b5f15935ab077a400 to your computer and use it in GitHub Desktop.
Test Keras.io reverse recurrences
# coding: utf-8
# In[1]:
get_ipython().magic(u'matplotlib inline')
# In[2]:
from __future__ import print_function
import scipy as S
# In[3]:
### SETTINGS ###
try_it_backwards=False
N_EXAMPLES = 20 #The number of training examples to give
N_CORRUPTIONS = 15 #Each training example is not the uncorrupted input, but rather, this many noised versions of the input.
CORRUPTION_STD = 0.1 #std of gaussian noise added to inputs to create corrupted inputs
N_TRAIN_EPOCHS=100
N_BATCH_SIZE=5
N_NEURONS=10
# In[4]:
#
#Create Input
#
#This section creates and displays
#
def unit_impulse(x):
return S.array(S.logical_and(-1 <= x,x <= 1),dtype=S.float_)
def smooth_impulse(x):
return S.real(S.exp(-S.power(x,2.0)))
SPACINGS = S.linspace(-10.0,10.0,N_EXAMPLES)
xs = S.array([S.linspace(-10.0+i,10.0+i,200) for i in SPACINGS])
y = smooth_impulse(xs)
z = S.tanh(xs)
# In[5]:
#The following code is not needed as it gets output at the end. Kept here in case someone wants to fiddle with the inputs.
_ = """
import pylab
pylab.figure()
pylab.title("True output curves")
for i in range(N_EXAMPLES):
pylab.plot(z[i])
pylab.figure()
pylab.title("Uncorrupted Input curves")
for i in range(N_EXAMPLES):
pylab.plot(y[i])
"""
# In[6]:
#Create the corrupted training data with appropriate shapes for Keras
trainX = S.dstack([y+CORRUPTION_STD*S.randn(*y.shape) for i in range(N_CORRUPTIONS)])
trainY = S.dstack([z])
# In[7]:
print("trainX.shape: ", trainX.shape)
print("trainY.shape: ", trainY.shape)
# #Setup the basic LSTM Model
# In[8]:
from keras.models import Sequential
from keras.layers.core import Activation, Dense, TimeDistributedDense, Dropout
from keras.layers.recurrent import LSTM, SimpleRNN
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.regularizers import l2, activity_l2
from keras.optimizers import SGD
#FORWARD
print("Begin creating forward model")
fwmodel = Sequential()
fwmodel.add(LSTM(N_NEURONS,activation="tanh",
return_sequences=True,
go_backwards=False,
input_shape=trainX[0].shape))
fwmodel.add(TimeDistributedDense(1,activation="tanh"))
print("compiling forward model")
fwmodel.compile(loss="mean_squared_error", optimizer="sgd")
#BACKWARDS
print("Begin creating backward model")
bwmodel = Sequential()
bwmodel.add(LSTM(N_NEURONS,activation="tanh",
return_sequences=True,
go_backwards=True,
input_shape=trainX[0].shape))
bwmodel.add(TimeDistributedDense(1,activation="tanh"))
print("Compiling backward model")
bwmodel.compile(loss="mean_squared_error", optimizer="sgd")
print("Done creating models")
# In[9]:
#TRAIN
print("Training forward")
fw_history = fwmodel.fit(trainX, trainY, nb_epoch=N_TRAIN_EPOCHS, batch_size=N_BATCH_SIZE, verbose=False)
print("Training backward")
bw_history = bwmodel.fit(trainX, trainY, nb_epoch=N_TRAIN_EPOCHS, batch_size=N_BATCH_SIZE, verbose=False)
print("Done training")
# In[10]:
#Display everything
fw_yprimes = fwmodel.predict(trainX)
bw_yprimes = bwmodel.predict(trainX)
import pylab
pylab.figure()
pylab.title("Training Losses")
pylab.plot(fw_history.history['loss'],label="FW loss")
pylab.plot(bw_history.history['loss'],label="BW loss")
pylab.legend()
pylab.figure()
pylab.title("Uncorrupted impulses")
for i in range(N_EXAMPLES):
pylab.plot(y[i])
pylab.figure()
pylab.title("True curves")
for i in range(N_EXAMPLES):
pylab.plot(z[i])
fig = pylab.figure(figsize=(15,5))
pylab.title("Predicted curves")
ax1 = fig.add_subplot(1,2,1)
ax1.set_title("fw")
for i in range(N_EXAMPLES):
ax1.plot(fw_yprimes[i])
ax2 = fig.add_subplot(1,2,2)
ax2.set_title("bw")
for i in range(N_EXAMPLES):
ax2.plot(bw_yprimes[i])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment