Skip to content

Instantly share code, notes, and snippets.

View khuangaf's full-sized avatar
:octocat:
Focusing

Kung-Hsiang Steeve Huang khuangaf

:octocat:
Focusing
View GitHub Profile
ground_true_df = pd.DataFrame()
ground_true_df['times'] = ground_true_times
ground_true_df['value'] = ground_true
prediction_df = pd.DataFrame()
prediction_df['times'] = validation_output_times
prediction_df['value'] = predicted_inverted
prediction_df = prediction_df.loc[(prediction_df["times"].dt.year == 2017 )&(prediction_df["times"].dt.month > 7 ),: ]
ground_true_df = ground_true_df.loc[(ground_true_df["times"].dt.year == 2017 )&(ground_true_df["times"].dt.month > 7 ),:]
plt.figure(figsize=(20,10))
plt.plot(ground_true_df.times,ground_true_df.value, label = 'Actual')
plt.plot(prediction_df.times,prediction_df.value,'ro', label='Predicted')
plt.legend(loc='upper left')
plt.show()
def fit_lstm(reg):
global training_datas, training_labels, batch_size, epochs,step_size,nb_features, units
model = Sequential()
model.add(CuDNNLSTM(units=units, bias_regularizer=reg, input_shape=(step_size,nb_features),return_sequences=False))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(output_size))
model.add(LeakyReLU())
model.compile(loss='mse', optimizer='adam')
model.fit(training_datas, training_labels, batch_size=batch_size, epochs = epochs, verbose=0)
def experiment(validation_datas,validation_labels,original_datas,ground_true,ground_true_times,validation_original_outputs, validation_output_times, nb_repeat, reg):
error_scores = list()
#get only the close data
ground_true = ground_true[:,:,0].reshape(-1)
ground_true_times = ground_true_times.reshape(-1)
ground_true_times = pd.to_datetime(ground_true_times, unit='s')
validation_output_times = pd.to_datetime(validation_output_times.reshape(-1), unit='s')
for i in range(nb_repeat):
model = fit_lstm(reg)
predicted = model.predict(validation_datas)
results.describe().boxplot()
plt.show()
class DataSrc(object):
"""Acts as data provider for each new episode."""
def __init__(self, df, steps=252, scale=True, scale_extra_cols=True, augment=0.00, window_length=50):
"""
DataSrc.
df - csv for data frame index of timestamps
and multi-index columns levels=[['LTCBTC'],...],['open','low','high','close',...]]
an example is included as an hdf file in this repository
steps - total steps in episode
class DataSrc(object):
def _step(self):
# get history matrix from dataframe
data_window = self.data[:, self.step:self.step +
self.window_length].copy()
# (eq.1) prices
y1 = data_window[:, -1, 0] / data_window[:, -2, 0]
y1 = np.concatenate([[1.0], y1]) # add cash price
class PortfolioSim(object):
def _step(self, w1, y1):
"""
Step.
w1 - new action of portfolio weights - e.g. [0.1,0.9, 0.0]
y1 - price relative vector also called return
e.g. [1.0, 0.9, 1.1]
Numbered equations are from https://arxiv.org/abs/1706.10059
"""
w0 = self.w0
class PortfolioEnv(gym.Env):
def _step(self, action):
"""
Step the env.
Actions should be portfolio [w0...]
- Where wn is a portfolio weight between 0 and 1. The first (w0) is cash_bias
- cn is the portfolio conversion weights see PortioSim._step for description
"""
logger.debug('action: %s', action)
import gym.spaces
import gym.wrappers
import numpy as np
def concat_states(state):
history = state["history"]
weights = state["weights"]
weight_insert_shape = (history.shape[0], 1, history.shape[2])
if len(weights) - 1 == history.shape[0]: