Skip to content

Instantly share code, notes, and snippets.

View adhadse's full-sized avatar
🐧

Anurag Dhadse adhadse

🐧
View GitHub Profile
model.compile(loss="sparse_categorical_crossentropy",
optimizer="sgd" # Stochastic gradient Descent
metrics=["accuracy"])
import pandas as pd
import seaborn as sns
sns.set_theme()
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
input_ = keras.layers.Input(shape=X_train.shape[1:])
hidden1 = keras.layers.Dense(30, activation="relu")(input_)
hidden2 = keras.layer.Dense(30, activation="relu")(hidden1)
concat = keras.layers.Concatenate()([input_, hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.Model(inputs=[input_], output=[output])
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_A)
hiddden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.Concatenate()([input_A, hidden2])
output = keras.layer.Dense(1, name="output")(concat)
model = keras.Model(inputs=[input_A, input_B], outputs=[output])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
# Splitting the train, test and validation data
# for different sets of featrues
X_train_A, X_train_B = X_tain[:, :5], X_train[:, 2:]
X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2:]
X_test_A, X_test_B = X_test[:, :5], X_test[:, 2:]
X_new_A, X_new_B = X_test_A[:3], x_test_b[:3] # suppose an unknown sample
# Providing a tuple of training and validation data
history = model.fit({"wide_input": X_train_A,
"deep_input": X_train_B},
y_train,
epochs=20,
validation_data=({"wide_input": X_valid_A,
"deep_input": X_valid_B"}, y_valid))
[...] # Same as above, up to main output layer
output = keras.layers.Dense(1, name="main_output")(concat)
aux_output = keras.layers.Dense(1, name="aux_output")(hidden2)
model = keras.Model(inputs=[input_A, input_B],
outputs=[output, aux_output])
model.compile(loss=["mse", "mse"], loss_weights=[0.9, 0.1], optimizer="sgd")
# Here we passed the same labels y_test
# as the auxilary output is just for regularization
total_loss, main_loss, aux_loss = model.evaluate([X_test_A, X_test_B],
[y_test, y_test])
y_pred_main, y_pred_aux = model.predict([X_new_A, X_new_B])