Skip to content

Instantly share code, notes, and snippets.

@habedi
Created October 15, 2022 06:02
Show Gist options
  • Select an option

  • Save habedi/073068eefe0f259d5b509275083a5e1e to your computer and use it in GitHub Desktop.

Select an option

Save habedi/073068eefe0f259d5b509275083a5e1e to your computer and use it in GitHub Desktop.
Example Python code for performing multi-output regression with Keras and TensorFlow
# Download the 'train_embeddings.pkl' from https://files.fm/f/76h5d4229
# Run the code in the folder where 'train_embeddings.pkl' is located.
import io
import compress_pickle as pickle
import numpy as np
import tensorflow as tf
from keras import Sequential
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.optimizers import Adam
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
# Model parameters
learning_rate = 1e-3
epochs = 20
patience = 3
batch_size = 32
with io.open("train_embeddings.pkl", "rb") as inf:
train_data = pickle.load(inf)
X = np.array(train_data["embedding"])
# y = np.array([[i[0]] for i in train_data["labels"]])
y = np.array(train_data["labels"])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=60)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
model = Sequential([
tf.keras.layers.Dense(20, input_dim=X_train[0].shape[0], kernel_initializer='he_uniform', activation="relu"),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(y[0].shape[0], activation=None)
])
callbacks = [ModelCheckpoint(filepath="best_model.keras", save_best_only=True, monitor="val_loss"),
ReduceLROnPlateau(monitor='val_loss', factor=1e-1, patience=patience, min_lr=1e-5),
EarlyStopping(monitor='val_loss', patience=patience)]
optimizer = Adam(learning_rate=learning_rate)
# optimizer = RMSprop(learning_rate=learning_rate)
# optimizer = SGD(learning_rate=learning_rate)
model.compile(loss="mae", optimizer=optimizer)
print(model.summary())
history = model.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), epochs=epochs, callbacks=callbacks,
batch_size=batch_size)
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs_ = range(1, len(loss) + 1)
plt.figure()
plt.plot(epochs_, loss, "b", label="Training loss")
plt.plot(epochs_, val_loss, "r", label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment