Skip to content

Instantly share code, notes, and snippets.

@sam-thecoder
Created August 24, 2018 12:12
Show Gist options
  • Save sam-thecoder/01989b2c82a02ae2351f92fac9a2dc43 to your computer and use it in GitHub Desktop.
Save sam-thecoder/01989b2c82a02ae2351f92fac9a2dc43 to your computer and use it in GitHub Desktop.
import keras
from keras import layers
from keras.layers.core import Dense, Activation
from keras.models import Sequential
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import accuracy_score
from keras.layers import LSTM
from sklearn import preprocessing
# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
dummies = pd.get_dummies(df[target])
return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32)
else:
# Regression
return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32)
# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).
def encode_text_index(df, name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
X,y = to_xy(df,"Actual")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
model = Sequential()
model.add(Dense(80, activation='relu', input_dim=X_train.shape[1]))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(y_train.shape[1],activation='softmax'))
#model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
#model.fit(X_train, y_train, epochs=1000)
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-2, patience=25, verbose=1, mode='auto')
checkpointer = ModelCheckpoint(filepath="best_weights.hdf5", verbose=0, save_best_only=True) # save best model
model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[monitor,checkpointer], verbose=2, epochs=1000)
model.load_weights('best_weights.hdf5') # load weights from best model
# Train on 2362 samples, validate on 591 samples
# Epoch 1/1000
# - 3s - loss: 4.0986 - val_loss: 3.2790
# Epoch 2/1000
# - 0s - loss: 1.9809 - val_loss: 0.4511
# Epoch 3/1000
# - 0s - loss: 0.8889 - val_loss: 0.3435
# Epoch 4/1000
# - 0s - loss: 0.8331 - val_loss: 0.4944
# Epoch 5/1000
# - 0s - loss: 0.7122 - val_loss: 2.7115
# Epoch 6/1000
# - 0s - loss: 2.3070 - val_loss: 2.1721
# Epoch 7/1000
# - 0s - loss: 0.7217 - val_loss: 0.3364
# Epoch 8/1000
# - 0s - loss: 0.4693 - val_loss: 0.2795
# Epoch 9/1000
# - 0s - loss: 1.1468 - val_loss: 0.2923
# Epoch 10/1000
# - 0s - loss: 0.3399 - val_loss: 0.2821
# Epoch 11/1000
# - 0s - loss: 0.6785 - val_loss: 0.6393
# Epoch 12/1000
# - 0s - loss: 0.4028 - val_loss: 0.3180
# Epoch 13/1000
# - 0s - loss: 0.3777 - val_loss: 0.9051
# Epoch 14/1000
# - 0s - loss: 0.3900 - val_loss: 0.2362
# Epoch 15/1000
# - 0s - loss: 0.3052 - val_loss: 0.3325
# Epoch 16/1000
# - 0s - loss: 0.1849 - val_loss: 0.5999
# Epoch 17/1000
# - 0s - loss: 0.3221 - val_loss: 0.1700
# Epoch 18/1000
# - 0s - loss: 0.3261 - val_loss: 0.5254
# Epoch 19/1000
# - 0s - loss: 0.3268 - val_loss: 0.2400
# Epoch 20/1000
# - 0s - loss: 0.1985 - val_loss: 0.1696
# Epoch 21/1000
# - 0s - loss: 0.3524 - val_loss: 0.1792
# Epoch 22/1000
# - 0s - loss: 0.3941 - val_loss: 0.2260
# Epoch 23/1000
# - 0s - loss: 0.1884 - val_loss: 0.1476
# Epoch 24/1000
# - 0s - loss: 0.1158 - val_loss: 0.1179
# Epoch 25/1000
# - 0s - loss: 0.2895 - val_loss: 0.1734
# Epoch 26/1000
# - 0s - loss: 0.2313 - val_loss: 0.1787
# Epoch 27/1000
# - 0s - loss: 0.3110 - val_loss: 0.1681
# Epoch 28/1000
# - 1s - loss: 0.1183 - val_loss: 0.1103
# Epoch 29/1000
# - 1s - loss: 0.1586 - val_loss: 0.1469
# Epoch 30/1000
# - 0s - loss: 0.0938 - val_loss: 0.1351
# Epoch 31/1000
# - 0s - loss: 0.1677 - val_loss: 0.1374
# Epoch 32/1000
# - 1s - loss: 0.1437 - val_loss: 0.1216
# Epoch 33/1000
# - 1s - loss: 0.8587 - val_loss: 0.1599
# Epoch 34/1000
# - 1s - loss: 0.1596 - val_loss: 0.1297
# Epoch 35/1000
# - 1s - loss: 0.3959 - val_loss: 0.1348
# Epoch 36/1000
# - 0s - loss: 0.2298 - val_loss: 0.2588
# Epoch 37/1000
# - 0s - loss: 0.1740 - val_loss: 0.2553
# Epoch 38/1000
# - 0s - loss: 0.1085 - val_loss: 0.1412
# Epoch 39/1000
# - 1s - loss: 0.0834 - val_loss: 0.0953
# Epoch 40/1000
# - 0s - loss: 0.1084 - val_loss: 0.1288
# Epoch 41/1000
# - 0s - loss: 0.0784 - val_loss: 0.1193
# Epoch 42/1000
# - 0s - loss: 0.0904 - val_loss: 0.1151
# Epoch 43/1000
# - 0s - loss: 0.2667 - val_loss: 0.3757
# Epoch 44/1000
# - 0s - loss: 0.1796 - val_loss: 0.1106
# Epoch 45/1000
# - 1s - loss: 0.1933 - val_loss: 0.3802
# Epoch 46/1000
# - 0s - loss: 0.2617 - val_loss: 0.3527
# Epoch 47/1000
# - 1s - loss: 0.1542 - val_loss: 0.4486
# Epoch 48/1000
# - 0s - loss: 0.1990 - val_loss: 0.1225
# Epoch 49/1000
# - 1s - loss: 0.7622 - val_loss: 0.2424
# Epoch 50/1000
# - 0s - loss: 0.2389 - val_loss: 0.1226
# Epoch 51/1000
# - 0s - loss: 0.0747 - val_loss: 0.0958
# Epoch 52/1000
# - 1s - loss: 0.1290 - val_loss: 0.2079
# Epoch 53/1000
# - 0s - loss: 0.0889 - val_loss: 0.1068
# Epoch 54/1000
# - 1s - loss: 0.0584 - val_loss: 0.0944
# Epoch 55/1000
# - 1s - loss: 0.2151 - val_loss: 0.1194
# Epoch 56/1000
# - 1s - loss: 0.1633 - val_loss: 0.0893
# Epoch 57/1000
# - 1s - loss: 0.0686 - val_loss: 0.0960
# Epoch 58/1000
# - 0s - loss: 0.0607 - val_loss: 0.0884
# Epoch 59/1000
# - 1s - loss: 0.0651 - val_loss: 0.1915
# Epoch 60/1000
# - 0s - loss: 0.0562 - val_loss: 0.0995
# Epoch 61/1000
# - 1s - loss: 1.9859 - val_loss: 5.9858
# Epoch 62/1000
# - 0s - loss: 2.9463 - val_loss: 0.9567
# Epoch 63/1000
# - 0s - loss: 1.5587 - val_loss: 0.1932
# Epoch 64/1000
# - 0s - loss: 1.1365 - val_loss: 2.4021
# Epoch 00064: early stopping
pred = model.predict(X_test)
print("Shape: {}".format(pred.shape))
print(pred)
# Shape: (591, 2)
# [[1.1338343e-02 9.8866165e-01]
# [0.0000000e+00 1.0000000e+00]
# [1.4060745e-05 9.9998593e-01]
# ...
# [7.8182465e-01 2.1817538e-01]
# [0.0000000e+00 1.0000000e+00]
# [3.2087931e-04 9.9967909e-01]]
predict_classes = np.argmax(pred,axis=1)
print("Predictions: {}".format(predict_classes))
print("Expected: {}".format(np.argmax(y_test, axis=1)))
y_test_arg = np.argmax(y_test, axis=1)
# Predictions: [1 1 1 1 0 1 0 1 0 0 0 0 1 0 1 1 1 1 0 1 0 0 0 1 1 0 1 0 0 1 0 0 0 1 0 1 1
# 1 0 1 0 1 1 0 0 0 1 0 0 1 0 1 0 0 1 1 0 0 0 1 0 0 1 1 1 0 0 1 1 1 1 0 1 0
# 0 0 1 1 1 0 1 0 1 1 0 0 0 0 1 0 1 0 1 0 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 1
# 1 1 1 1 0 0 1 1 1 0 1 1 1 0 1 0 0 0 0 1 1 0 1 1 1 1 1 0 1 1 0 1 0 0 0 0 1
# 1 1 1 1 0 1 1 1 0 0 0 1 0 1 0 1 0 1 1 1 1 1 1 1 1 0 0 1 0 1 0 0 1 0 0 0 1
# 1 1 0 1 1 1 1 0 0 0 1 1 0 1 1 1 0 0 1 1 0 1 1 1 0 1 1 1 1 1 0 1 0 1 1 0 0
# 0 0 1 1 1 0 1 1 0 1 1 1 1 1 1 0 0 1 1 1 1 0 1 1 1 0 1 1 1 0 1 1 0 1 1 1 0
# 0 1 1 0 1 0 1 0 0 0 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 1 0 1 1 1 1 1 0 1 0 1 0
# 1 1 1 1 0 1 0 1 1 1 1 1 0 1 1 0 1 0 0 1 1 1 1 1 0 0 0 1 1 0 1 0 1 1 1 1 1
# 1 1 1 0 1 0 1 0 1 1 1 1 0 1 0 0 0 1 1 1 1 0 0 1 1 1 0 1 0 0 1 1 1 0 1 0 0
# 1 1 1 1 0 0 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 0 1 0 1 1 0 1 1 1 1 0 0 1
# 1 0 1 1 1 1 1 0 0 1 1 0 1 1 0 1 0 0 1 1 0 1 1 1 1 1 0 1 0 1 1 1 1 1 1 0 0
# 0 1 0 1 0 1 0 0 1 1 0 1 0 1 1 0 1 1 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 1 1 1
# 0 1 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 0 1 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 0 1 1
# 1 1 1 0 1 1 0 1 0 1 0 1 1 0 1 0 1 1 1 0 0 1 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0
# 0 0 0 1 0 1 0 0 1 1 0 0 1 0 0 0 1 1 1 0 1 0 1 0 0 1 1 1 0 1 0 1 1 0 1 1]
# Expected: [1 1 1 1 0 1 0 1 0 0 0 0 1 0 1 1 1 1 0 1 0 0 0 1 1 0 1 0 0 1 0 0 0 1 0 1 1
# 1 0 1 0 1 1 0 0 0 1 0 0 1 0 1 0 0 1 1 0 0 0 1 0 0 1 1 1 0 0 1 1 1 1 0 1 0
# 0 0 1 1 1 0 1 0 1 1 0 0 0 0 1 0 1 0 1 0 1 1 1 1 0 1 1 0 0 1 0 0 1 1 1 0 1
# 1 0 1 0 0 0 1 1 1 0 1 1 1 0 1 0 0 0 0 1 1 0 1 1 0 1 1 0 1 1 0 1 0 0 0 0 1
# 1 1 1 1 0 1 1 1 0 0 0 1 0 1 0 0 0 1 1 1 1 1 1 1 1 0 0 1 0 0 0 0 1 0 0 0 1
# 1 1 0 1 1 1 1 0 0 0 1 1 0 1 1 1 0 0 1 1 0 1 1 1 0 1 1 1 1 1 0 1 0 1 1 0 0
# 0 0 1 1 1 0 1 1 0 1 1 1 1 1 1 0 0 1 1 1 1 0 1 1 1 0 1 1 1 0 1 1 0 1 1 1 0
# 0 1 1 0 1 0 1 0 0 0 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 1 0 1 1 1 1 1 0 1 0 1 0
# 1 1 1 1 0 1 0 1 1 1 1 1 0 1 0 0 1 0 0 1 1 1 1 1 0 0 0 1 1 0 1 0 1 1 1 1 1
# 0 1 1 0 1 0 1 0 1 1 0 1 0 1 0 0 0 1 1 1 1 0 0 1 1 1 0 1 0 0 1 1 0 0 1 0 0
# 1 1 1 1 0 0 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 0 1 0 1 1 1 1 1 1 1 0 0 1
# 1 0 1 1 1 1 1 0 0 1 1 0 1 1 0 1 0 0 1 1 0 1 1 1 1 1 0 1 0 1 1 1 1 1 1 0 0
# 0 1 0 1 0 1 0 0 1 1 0 1 0 1 1 0 1 1 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 0 0 1 1
# 0 1 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 0 1 0 0 0 0 0 0 1 1 1 1 0 1 1 0 1 0 1 1
# 1 1 1 0 1 1 0 1 0 1 0 1 1 0 1 0 1 1 1 0 0 1 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0
# 0 0 0 0 0 1 0 0 1 1 0 0 1 0 0 0 1 1 1 0 1 0 1 0 0 1 1 1 0 1 0 1 0 0 1 1]
correct = accuracy_score(y_test_arg,predict_classes)
print("Accuracy: {}".format(correct))
#Accuracy: 0.9712351945854484
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment