Created
December 4, 2020 08:44
-
-
Save Eligijus112/33a0e6b1493ff29b3891cb575830c349 to your computer and use it in GitHub Desktop.
Class to create a model object used for sequence modeling
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class NNMultistepModel(): | |
def __init__( | |
self, | |
X, | |
Y, | |
n_outputs, | |
n_lag, | |
n_ft, | |
n_layer, | |
batch, | |
epochs, | |
lr, | |
Xval=None, | |
Yval=None, | |
mask_value=-999.0, | |
min_delta=0.001, | |
patience=5 | |
): | |
lstm_input = Input(shape=(n_lag, n_ft)) | |
# Series signal | |
lstm_layer = LSTM(n_layer, activation='relu')(lstm_input) | |
x = Dense(n_outputs)(lstm_layer) | |
self.model = Model(inputs=lstm_input, outputs=x) | |
self.batch = batch | |
self.epochs = epochs | |
self.n_layer=n_layer | |
self.lr = lr | |
self.Xval = Xval | |
self.Yval = Yval | |
self.X = X | |
self.Y = Y | |
self.mask_value = mask_value | |
self.min_delta = min_delta | |
self.patience = patience | |
def trainCallback(self): | |
return EarlyStopping(monitor='loss', patience=self.patience, min_delta=self.min_delta) | |
def train(self): | |
# Getting the untrained model | |
empty_model = self.model | |
# Initiating the optimizer | |
optimizer = keras.optimizers.Adam(learning_rate=self.lr) | |
# Compiling the model | |
empty_model.compile(loss=losses.MeanAbsoluteError(), optimizer=optimizer) | |
if (self.Xval is not None) & (self.Yval is not None): | |
history = empty_model.fit( | |
self.X, | |
self.Y, | |
epochs=self.epochs, | |
batch_size=self.batch, | |
validation_data=(self.Xval, self.Yval), | |
shuffle=False, | |
callbacks=[self.trainCallback()] | |
) | |
else: | |
history = empty_model.fit( | |
self.X, | |
self.Y, | |
epochs=self.epochs, | |
batch_size=self.batch, | |
shuffle=False, | |
callbacks=[self.trainCallback()] | |
) | |
# Saving to original model attribute in the class | |
self.model = empty_model | |
# Returning the training history | |
return history | |
def predict(self, X): | |
return self.model.predict(X) |
Hello dear sfallahpour,
Yes, it is better to first use my function that prepares the input.
thank you for the response. I am trying to use your "create function " to prepare data and when i run it i get error:
Tesla = pd.read_csv('TSLA.csv')
training_set = Tesla['Close'].values
create_X_Y(training_set, lag=60, n_ahead=10, target_index=0)
IndexError: tuple index out of range
it has something to do with n_features=ts.shape[1]. Can you elaborate on what n_features is for and also what is the "target_index=0"
Thank you
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
hi there, have a question for you. if i want to use this class for LSTM should i use your createXy.py (https://gist.github.com/Eligijus112/b28fb1dadf422139035c481017e7a71a)
first to prepare the data and then run this in the prepared data?