Created
August 17, 2020 14:46
-
-
Save netsatsawat/19f922b838159195c5e80d08298e76e6 to your computer and use it in GitHub Desktop.
Snippet example of LSTM implementation using tensorflow
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
lstm_model = tf.keras.Sequential([ | |
layers.LSTM(32, return_sequences=True, input_shape=(n_steps, 1)), | |
layers.LSTM(32, return_sequences=True), | |
layers.Dropout(0.2), | |
layers.LSTM(32, return_sequences=True), | |
layers.LSTM(32), | |
layers.Dropout(0.2), | |
layers.Dense(1) | |
]) | |
lstm_model.compile(optimizer=tf.keras.optimizers.Adam(), | |
loss=tf.losses.MeanSquaredError(), | |
metrics=['mae', 'mse']) | |
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', | |
patience=60, | |
mode='min') | |
lstm_hist = lstm_model.fit( | |
X_train, y_train, epochs=500, validation_split=0.2, | |
batch_size=32, verbose=0, | |
callbacks=[tfdocs.modeling.EpochDots(), early_stopping] | |
) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment