Skip to content

Instantly share code, notes, and snippets.

View JanSchm's full-sized avatar

Jan Schmitz JanSchm

View GitHub Profile
def create_model():
in_seq = Input(shape = (seq_len, 5))
x = Bidirectional(LSTM(128, return_sequences=True))(in_seq)
x = Bidirectional(LSTM(128, return_sequences=True))(x)
x = Bidirectional(LSTM(64, return_sequences=True))(x)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
conc = concatenate([avg_pool, max_pool])
model.fit(X_train, y_train,
batch_size=2048,
verbose=2,
callbacks=[callback],
epochs=200,
validation_data=(X_val, y_val),)
df['Open'] = df['Open'].pct_change()
df['High'] = df['High'].pct_change()
df['Low'] = df['Low'].pct_change()
df['Close'] = df['Close'].pct_change()
df['Volume'] = df['Volume'].pct_change()
'''Normalize price columns'''
min_return = min(df[['Open', 'High', 'Low', 'Close']].min(axis=0))
max_return = max(df[['Open', 'High', 'Low', 'Close']].max(axis=0))
# Min-max normalize price columns (0-1 range)
df['Open'] = (df['Open'] - min_return) / (max_return - min_return)
df['High'] = (df['High'] - min_return) / (max_return - min_return)
df['Low'] = (df['Low'] - min_return) / (max_return - min_return)
df['Close'] = (df['Close'] - min_return) / (max_return - min_return)
times = sorted(df.index.values)
last_10pct = sorted(df.index.values)[-int(0.1*len(times))] # Last 10% of series
last_20pct = sorted(df.index.values)[-int(0.2*len(times))] # Last 20% of series
df_train = df[(df.index < last_20pct)] # Training data are 80% of total data
df_val = df[(df.index >= last_20pct) & (df.index < last_10pct)]
df_test = df[(df.index >= last_10pct)]
def Inception_A(layer_in, c7):
branch1x1_1 = Conv1D(c7, kernel_size=1, padding="same", use_bias=False)(layer_in)
branch1x1 = BatchNormalization()(branch1x1_1)
branch1x1 = ReLU()(branch1x1)
branch5x5_1 = Conv1D(c7, kernel_size=1, padding='same', use_bias=False)(layer_in)
branch5x5 = BatchNormalization()(branch5x5_1)
branch5x5 = ReLU()(branch5x5)
branch5x5 = Conv1D(c7, kernel_size=5, padding='same', use_bias=False)(branch5x5)
branch5x5 = BatchNormalization()(branch5x5)
def create_model():
in_seq = Input(shape=(seq_len, 5))
x = Inception_A(in_seq, 32)
x = Inception_A(x, 32)
x = Inception_B(x, 32)
x = Inception_B(x, 32)
x = Inception_C(x, 32)
x = Inception_C(x, 32)
model.fit(X_train, y_train,
batch_size=2048,
verbose=2,
callbacks=[callback],
epochs=200,
validation_data=(X_val, y_val),)
class Time2Vector(Layer):
def __init__(self, seq_len, **kwargs):
super(Time2Vector, self).__init__()
self.seq_len = seq_len
def build(self, input_shape):
self.weights_linear = self.add_weight(name='weight_linear',
shape=(int(self.seq_len),),
initializer='uniform',
trainable=True)
class SingleAttention(Layer):
def __init__(self, d_k, d_v):
super(SingleAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
def build(self, input_shape):
self.query = Dense(self.d_k, input_shape=input_shape, kernel_initializer='glorot_uniform', bias_initializer='glorot_uniform')
self.key = Dense(self.d_k, input_shape=input_shape, kernel_initializer='glorot_uniform', bias_initializer='glorot_uniform')
self.value = Dense(self.d_v, input_shape=input_shape, kernel_initializer='glorot_uniform', bias_initializer='glorot_uniform')