Last active
June 20, 2017 14:18
-
-
Save maxberggren/bededf71dbe8b89c353d31a07ddb610d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": 1, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [], | |
"source": [ | |
"%matplotlib inline\n", | |
"import numpy as np\n", | |
"import pandas as pd\n", | |
"import matplotlib.pyplot as plt\n", | |
"plt.style.use('ggplot')\n", | |
"\n", | |
"seed = 123456\n", | |
"np.random.seed(seed)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 2, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [], | |
"source": [ | |
"target_variable = 'species'\n", | |
"df = (\n", | |
" pd.read_csv('https://gist.githubusercontent.com/curran/a08a1080b88344b0c8a7/raw/d546eaee765268bf2f487608c537c05e22e4b221/iris.csv')\n", | |
"\n", | |
" # Rename columns to lowercase and underscores\n", | |
" .pipe(lambda d: d.rename(columns={\n", | |
" k: v for k, v in zip(\n", | |
" d.columns, \n", | |
" [c.lower().replace(' ', '_') for c in d.columns]\n", | |
" )\n", | |
" }))\n", | |
" # Switch categorical classes to integers\n", | |
" .assign(**{target_variable: lambda r: r[target_variable].astype('category').cat.codes})\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 3, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/plain": [ | |
"<matplotlib.axes._subplots.AxesSubplot at 0x1076f0710>" | |
] | |
}, | |
"execution_count": 3, | |
"metadata": {}, | |
"output_type": "execute_result" | |
}, | |
{ | |
"data": { | |
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD4CAYAAAD8Zh1EAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAADcdJREFUeJzt3F1oHPW/x/HPJBGiTbPZNLGaaJU2lWJZCZjgA8oaOiKU\nXuy5KVgiFL0RhGp8wFAxEauwto3bBmq9EQUvhN6cATn1Zk1dQS9craFSsRhQitW2u8k4mtJSkt3/\nhecshsbuZrMPZ7++X1fu7KzzZX7wZpjOxMnn83kBABpeU70HAABUBkEHACMIOgAYQdABwAiCDgBG\nEHQAMIKgA4ARBB0AjCDoAGAEQQcAI1pqfcBff/211oesma6uLmWz2XqPgTKwdo3N+vr19PSUtB9X\n6ABgBEEHACMIOgAYQdABwAiCDgBGlPSUyzPPPKPW1lY1NTWpublZ8Xhc8/PzSiQSymQy6u7u1sjI\niNra2qo9LwDgH5T82OL4+Lja29sLnz3PUyQSUSwWk+d58jxPw8PDVRkSAFBc2bdc0um0otGoJCka\njSqdTldsKADAypV8hb5v3z41NTXp0Ucfleu6CoJA4XBYktTR0aEgCJb9XTKZVDKZlCTF43F1dXVV\nYOzSXPivB2t2LEm6UNOjSev/+8saH7F2WLvGxvrVR0lB37dvnzo7OxUEgd54441r3lpyHEeO4yz7\nW9d15bpu4bPlt7lqjXPZuFi7xlbr9avom6KdnZ2SpFAopMHBQc3MzCgUCsn3fUmS7/tL7q8DAGqv\naNCvXLmiy5cvF/771KlT2rBhgwYGBpRKpSRJqVRKg4OD1Z0UAHBdRW+5BEGggwcPSpIWFxf10EMP\nqb+/X5s2bVIikdDU1FThsUUAQP0UDfr69et14MCBa7avXbtWY2NjVRkKALByvCkKAEYQdAAwgqAD\ngBEEHQCMIOgAYARBBwAjCDoAGEHQAcAIgg4ARhB0ADCCoAOAEQQdAIwg6ABgBEEHACMIOgAYQdAB\nwAiCDgBGEHQAMIKgA4ARBB0AjCDoAGAEQQcAIwg6ABhB0AHACIIOAEYQdAAwgqADgBEEHQCMIOgA\nYARBBwAjCDoAGEHQAcCIllJ3zOVyGh0dVWdnp0ZHRzU/P69EIqFMJqPu7m6NjIyora2tmrMCAK6j\n5Cv048ePq7e3t/DZ8zxFIhFNTk4qEonI87yqDAgAKE1JQZ+dndXJkye1bdu2wrZ0Oq1oNCpJikaj\nSqfT1ZkQAFCSkoL+wQcfaHh4WI7jFLYFQaBwOCxJ6ujoUBAE1ZkQAFCSovfQv/nmG4VCIW3cuFGn\nT59edh/HcZbE/u+SyaSSyaQkKR6Pq6uraxXjrsyFmh2pPmp5LmuNtWtsrF99FA36mTNn9PXXX+vb\nb7/V1atXdfnyZU1OTioUCsn3fYXDYfm+r/b29mV/77quXNctfM5ms5Wb/l+Oc9m4WLvGVuv16+np\nKWm/okHftWuXdu3aJUk6ffq0Pv74Y+3Zs0cffvihUqmUYrGYUqmUBgcHVzcxAGBVyn4OPRaL6dSp\nU9qzZ4++++47xWKxSs4FAFihkp9Dl6StW7dq69atkqS1a9dqbGysKkMBAFaON0UBwAiCDgBGEHQA\nMIKgA4ARBB0AjCDoAGAEQQcAIwg6ABhB0AHACIIOAEYQdAAwgqADgBEEHQCMIOgAYARBBwAjCDoA\nGEHQAcAIgg4ARhB0ADCCoAOAEQQdAIwg6ABgBEEHACMIOgAYQdABwAiCDgBGEHQAMIKgA4ARBB0A\njCDoAGAEQQcAIwg6ABhB0AHAiJZiO1y9elXj4+NaWFjQ4uKi7r//fu3cuVPz8/NKJBLKZDLq7u7W\nyMiI2traajEzAGAZRYN+ww03aHx8XK2trVpYWNDY2Jj6+/v11VdfKRKJKBaLyfM8eZ6n4eHhWswM\nAFhG0VsujuOotbVVkrS4uKjFxUU5jqN0Oq1oNCpJikajSqfT1Z0UAHBdRa/QJSmXy+nll1/W+fPn\n9dhjj2nz5s0KgkDhcFiS1NHRoSAIlv1tMplUMpmUJMXjcXV1dVVo9OIu1OxI9VHLc1lrrF1jY/3q\no6SgNzU16cCBA7p06ZIOHjyos2fPLvnecRw5jrPsb13Xleu6hc/ZbHYV4+LvOJeNi7VrbLVev56e\nnpL2W9FTLmvWrNHWrVs1PT2tUCgk3/clSb7vq729feVTAgAqpmjQ//jjD126dEnSX0+8nDp1Sr29\nvRoYGFAqlZIkpVIpDQ4OVndSAMB1Fb3l4vu+jhw5olwup3w+rwceeED33nuv7rrrLiUSCU1NTRUe\nWwQA1E/RoN9xxx3av3//NdvXrl2rsbGxqgwFAFg53hQFACMIOgAYQdABwAiCDgBGEHQAMIKgA4AR\nBB0AjCDoAGAEQQcAIwg6ABhB0AHACIIOAEYQdAAwgqADgBEEHQCMIOgAYARBBwAjCDoAGEHQAcAI\ngg4ARhB0ADCCoAOAEQQdAIwg6ABgBEEHACMIOgAYQdABwAiCDgBGEHQAMIKgA4ARBB0AjCDoAGBE\nS7Edstmsjhw5ot9//12O48h1XW3fvl3z8/NKJBLKZDLq7u7WyMiI2traajEzAGAZRYPe3NysJ554\nQhs3btTly5c1Ojqqe+65R5999pkikYhisZg8z5PneRoeHq7FzACAZRS95RIOh7Vx40ZJ0o033qje\n3l7Nzc0pnU4rGo1KkqLRqNLpdHUnBQBc14ruoV+8eFE//fST+vr6FASBwuGwJKmjo0NBEFRlQABA\naYrecvk/V65c0cTEhHbv3q2bbrppyXeO48hxnGV/l0wmlUwmJUnxeFxdXV2rGHdlLtTsSPVRy3NZ\na6xdY2P96qOkoC8sLGhiYkIPP/yw7rvvPklSKBSS7/sKh8PyfV/t7e3L/tZ1XbmuW/iczWYrMDYk\nzmUjY+0aW63Xr6enp6T9it5yyefzevfdd9Xb26sdO3YUtg8MDCiVSkmSUqmUBgcHyxwVAFAJRa/Q\nz5w5o88//1wbNmzQSy+9JEl6/PHHFYvFlEgkNDU1VXhsEQBQP0WDvmXLFh07dmzZ78bGxio+EACg\nPLwpCgBGEHQAMIKgA4ARBB0AjCDoAGAEQQcAIwg6ABhB0AHACIIOAEYQdAAwgqADgBEEHQCMIOgA\nYARBBwAjCDoAGEHQAcAIgg4ARhB0ADCCoAOAEQQdAIwg6ABgBEEHACMIOgAYQdABwAiCDgBGEHQA\nMIKgA4ARBB0AjCDoAGAEQQcAIwg6ABhB0AHACIIOAEa0FNvhnXfe0cmTJxUKhTQxMSFJmp+fVyKR\nUCaTUXd3t0ZGRtTW1lb1YQEA/6zoFfojjzyivXv3LtnmeZ4ikYgmJycViUTkeV7VBgQAlKZo0O++\n++5rrr7T6bSi0agkKRqNKp1OV2c6AEDJit5yWU4QBAqHw5Kkjo4OBUHwj/smk0klk0lJUjweV1dX\nVzmHLMuFmh2pPmp5LmuNtWtsrF99lBX0v3McR47j/OP3ruvKdd3C52w2u9pD4n9xLhsXa9fYar1+\nPT09Je1X1lMuoVBIvu9LknzfV3t7ezn/GwBABZUV9IGBAaVSKUlSKpXS4OBgRYcCAKxc0Vsuhw4d\n0vfff68///xTTz/9tHbu3KlYLKZEIqGpqanCY4sAgPoqGvTnnntu2e1jY2MVHwYAUD7eFAUAIwg6\nABhB0AHACIIOAEYQdAAwgqADgBEEHQCMIOgAYARBBwAjCDoAGEHQAcAIgg4ARhB0ADCCoAOAEQQd\nAIwg6ABgBEEHACMIOgAYQdABwAiCDgBGEHQAMIKgA4ARBB0AjCDoAGAEQQcAIwg6ABhB0AHACIIO\nAEYQdAAwgqADgBEEHQCMIOgAYETLan48PT2t999/X7lcTtu2bVMsFqvUXACAFSr7Cj2Xy+m9997T\n3r17lUgk9MUXX+iXX36p5GwAgBUoO+gzMzO65ZZbtH79erW0tOjBBx9UOp2u5GwAgBUo+5bL3Nyc\n1q1bV/i8bt06/fjjj9fsl0wmlUwmJUnxeFw9PT3lHnLl/ufr2h0LlcXaNTbWry6q/o+irusqHo8r\nHo9X+1B1Nzo6Wu8RUCbWrrGxfn8pO+idnZ2anZ0tfJ6dnVVnZ2dFhgIArFzZQd+0aZN+++03Xbx4\nUQsLC/ryyy81MDBQydkAACtQ9j305uZmPfnkk3rzzTeVy+U0NDSk22+/vZKzNRzXdes9AsrE2jU2\n1u8vTj6fz9d7CADA6vGmKAAYQdABwAiCDgBGEHQAMGJVf5zr3+zcuXNKp9Oam5uT9Ndz+QMDA7rt\nttvqPBlg37lz5zQ3N6fNmzertbW1sH16elr9/f11nKy+uEIvg+d5OnTokCSpr69PfX19kqTDhw/L\n87x6joZVOnHiRL1HQBHHjx/X/v379cknn+iFF15Y8jekPvroozpOVn9coZfhxIkTmpiYUEvL0tO3\nY8cOPf/88/wZ4QZ27NgxDQ0N1XsMXMenn36qt956S62trbp48aLefvttZTIZbd++Xf/2p7AJehkc\nx5Hv++ru7l6y3fd9OY5Tp6lQqhdffHHZ7fl8XkEQ1HgarFQ+ny/cZrn55pv12muvaWJiQplMhqDX\ne4BGtHv3br3++uu69dZbC39xMpvN6vz583rqqafqPB2KCYJAr7zyitasWbNkez6f16uvvlqnqVCq\nUCikn3/+WXfeeackqbW1VaOjozp69KjOnj1b3+HqjDdFy5TL5TQzM7PkH0X7+vrU1MQ/S/x/d/To\nUQ0NDWnLli3XfHf48GE9++yzdZgKpZqdnVVzc7M6Ojqu+e6HH35Ydl3/LQg6ABjB5SQAGEHQAcAI\ngg4ARhB0ADDiP4QEUCZFUbxgAAAAAElFTkSuQmCC\n", | |
"text/plain": [ | |
"<matplotlib.figure.Figure at 0x10b282950>" | |
] | |
}, | |
"metadata": {}, | |
"output_type": "display_data" | |
} | |
], | |
"source": [ | |
"df[target_variable].value_counts().sort_index().plot.bar()" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 4, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [], | |
"source": [ | |
"y = df[target_variable].values\n", | |
"X = (\n", | |
" # Drop target variable\n", | |
" df.drop(target_variable, axis=1)\n", | |
" # Min-max-scaling (only needed for the DL model)\n", | |
" .pipe(lambda d: (d-d.min())/d.max()).fillna(0)\n", | |
" .as_matrix()\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 5, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [], | |
"source": [ | |
"from sklearn.metrics import accuracy_score\n", | |
"from sklearn.model_selection import train_test_split, cross_val_score\n", | |
"\n", | |
"X_train, X_test, y_train, y_test = train_test_split(\n", | |
" X, y, test_size=0.33, random_state=seed\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 6, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stderr", | |
"output_type": "stream", | |
"text": [ | |
"Using Theano backend.\n" | |
] | |
} | |
], | |
"source": [ | |
"from keras.models import Sequential\n", | |
"from keras.callbacks import EarlyStopping, ModelCheckpoint\n", | |
"from keras.layers import Dense, Activation, Dropout\n", | |
"from keras import optimizers" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 7, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"m = Sequential()\n", | |
"m.add(Dense(128, activation='relu', input_shape=(X.shape[1],)))\n", | |
"m.add(Dropout(0.5))\n", | |
"m.add(Dense(128, activation='relu'))\n", | |
"m.add(Dropout(0.5))\n", | |
"m.add(Dense(128, activation='relu'))\n", | |
"m.add(Dropout(0.5))\n", | |
"m.add(Dense(len(np.unique(y)), activation='softmax'))\n", | |
" \n", | |
"m.compile(\n", | |
" optimizer=optimizers.Adam(lr=0.001),\n", | |
" loss='categorical_crossentropy',\n", | |
" metrics=['accuracy']\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 8, | |
"metadata": { | |
"collapsed": false, | |
"scrolled": true | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Train on 90 samples, validate on 10 samples\n", | |
"Epoch 1/200\n", | |
"Epoch 00000: val_loss improved from inf to 1.08902, saving model to best.model\n", | |
"0s - loss: 1.0810 - acc: 0.3111 - val_loss: 1.0890 - val_acc: 0.4000\n", | |
"Epoch 2/200\n", | |
"Epoch 00001: val_loss improved from 1.08902 to 1.08431, saving model to best.model\n", | |
"0s - loss: 1.0742 - acc: 0.3444 - val_loss: 1.0843 - val_acc: 0.4000\n", | |
"Epoch 3/200\n", | |
"Epoch 00002: val_loss improved from 1.08431 to 1.07970, saving model to best.model\n", | |
"0s - loss: 1.0753 - acc: 0.3222 - val_loss: 1.0797 - val_acc: 0.1000\n", | |
"Epoch 4/200\n", | |
"Epoch 00003: val_loss improved from 1.07970 to 1.07548, saving model to best.model\n", | |
"0s - loss: 1.0457 - acc: 0.4667 - val_loss: 1.0755 - val_acc: 0.1000\n", | |
"Epoch 5/200\n", | |
"Epoch 00004: val_loss improved from 1.07548 to 1.07193, saving model to best.model\n", | |
"0s - loss: 1.0484 - acc: 0.3444 - val_loss: 1.0719 - val_acc: 0.1000\n", | |
"Epoch 6/200\n", | |
"Epoch 00005: val_loss improved from 1.07193 to 1.06892, saving model to best.model\n", | |
"0s - loss: 1.0482 - acc: 0.3444 - val_loss: 1.0689 - val_acc: 0.1000\n", | |
"Epoch 7/200\n", | |
"Epoch 00006: val_loss improved from 1.06892 to 1.06596, saving model to best.model\n", | |
"0s - loss: 1.0244 - acc: 0.3556 - val_loss: 1.0660 - val_acc: 0.1000\n", | |
"Epoch 8/200\n", | |
"Epoch 00007: val_loss improved from 1.06596 to 1.06263, saving model to best.model\n", | |
"0s - loss: 1.0128 - acc: 0.3667 - val_loss: 1.0626 - val_acc: 0.1000\n", | |
"Epoch 9/200\n", | |
"Epoch 00008: val_loss improved from 1.06263 to 1.05920, saving model to best.model\n", | |
"0s - loss: 1.0057 - acc: 0.3778 - val_loss: 1.0592 - val_acc: 0.1000\n", | |
"Epoch 10/200\n", | |
"Epoch 00009: val_loss improved from 1.05920 to 1.05557, saving model to best.model\n", | |
"0s - loss: 1.0132 - acc: 0.3444 - val_loss: 1.0556 - val_acc: 0.1000\n", | |
"Epoch 11/200\n", | |
"Epoch 00010: val_loss improved from 1.05557 to 1.05126, saving model to best.model\n", | |
"0s - loss: 0.9930 - acc: 0.3889 - val_loss: 1.0513 - val_acc: 0.1000\n", | |
"Epoch 12/200\n", | |
"Epoch 00011: val_loss improved from 1.05126 to 1.04704, saving model to best.model\n", | |
"0s - loss: 0.9669 - acc: 0.4000 - val_loss: 1.0470 - val_acc: 0.2000\n", | |
"Epoch 13/200\n", | |
"Epoch 00012: val_loss improved from 1.04704 to 1.04199, saving model to best.model\n", | |
"0s - loss: 0.9622 - acc: 0.4000 - val_loss: 1.0420 - val_acc: 0.3000\n", | |
"Epoch 14/200\n", | |
"Epoch 00013: val_loss improved from 1.04199 to 1.03631, saving model to best.model\n", | |
"0s - loss: 0.9734 - acc: 0.3667 - val_loss: 1.0363 - val_acc: 0.4000\n", | |
"Epoch 15/200\n", | |
"Epoch 00014: val_loss improved from 1.03631 to 1.02875, saving model to best.model\n", | |
"0s - loss: 0.9528 - acc: 0.4222 - val_loss: 1.0288 - val_acc: 0.5000\n", | |
"Epoch 16/200\n", | |
"Epoch 00015: val_loss improved from 1.02875 to 1.01995, saving model to best.model\n", | |
"0s - loss: 0.9387 - acc: 0.3667 - val_loss: 1.0200 - val_acc: 0.5000\n", | |
"Epoch 17/200\n", | |
"Epoch 00016: val_loss improved from 1.01995 to 1.01071, saving model to best.model\n", | |
"0s - loss: 0.9008 - acc: 0.3889 - val_loss: 1.0107 - val_acc: 0.5000\n", | |
"Epoch 18/200\n", | |
"Epoch 00017: val_loss improved from 1.01071 to 1.00103, saving model to best.model\n", | |
"0s - loss: 0.9138 - acc: 0.4000 - val_loss: 1.0010 - val_acc: 0.5000\n", | |
"Epoch 19/200\n", | |
"Epoch 00018: val_loss improved from 1.00103 to 0.99100, saving model to best.model\n", | |
"0s - loss: 0.8879 - acc: 0.4889 - val_loss: 0.9910 - val_acc: 0.5000\n", | |
"Epoch 20/200\n", | |
"Epoch 00019: val_loss improved from 0.99100 to 0.98054, saving model to best.model\n", | |
"0s - loss: 0.9013 - acc: 0.4778 - val_loss: 0.9805 - val_acc: 0.5000\n", | |
"Epoch 21/200\n", | |
"Epoch 00020: val_loss improved from 0.98054 to 0.96944, saving model to best.model\n", | |
"0s - loss: 0.8908 - acc: 0.4556 - val_loss: 0.9694 - val_acc: 0.5000\n", | |
"Epoch 22/200\n", | |
"Epoch 00021: val_loss improved from 0.96944 to 0.95720, saving model to best.model\n", | |
"0s - loss: 0.8886 - acc: 0.5778 - val_loss: 0.9572 - val_acc: 0.5000\n", | |
"Epoch 23/200\n", | |
"Epoch 00022: val_loss improved from 0.95720 to 0.94371, saving model to best.model\n", | |
"0s - loss: 0.8760 - acc: 0.5667 - val_loss: 0.9437 - val_acc: 0.6000\n", | |
"Epoch 24/200\n", | |
"Epoch 00023: val_loss improved from 0.94371 to 0.92952, saving model to best.model\n", | |
"0s - loss: 0.8668 - acc: 0.5556 - val_loss: 0.9295 - val_acc: 0.6000\n", | |
"Epoch 25/200\n", | |
"Epoch 00024: val_loss improved from 0.92952 to 0.91461, saving model to best.model\n", | |
"0s - loss: 0.8260 - acc: 0.5667 - val_loss: 0.9146 - val_acc: 0.6000\n", | |
"Epoch 26/200\n", | |
"Epoch 00025: val_loss improved from 0.91461 to 0.89976, saving model to best.model\n", | |
"0s - loss: 0.8194 - acc: 0.6556 - val_loss: 0.8998 - val_acc: 0.6000\n", | |
"Epoch 27/200\n", | |
"Epoch 00026: val_loss improved from 0.89976 to 0.88462, saving model to best.model\n", | |
"0s - loss: 0.8235 - acc: 0.6556 - val_loss: 0.8846 - val_acc: 0.6000\n", | |
"Epoch 28/200\n", | |
"Epoch 00027: val_loss improved from 0.88462 to 0.86875, saving model to best.model\n", | |
"0s - loss: 0.8225 - acc: 0.6556 - val_loss: 0.8688 - val_acc: 0.6000\n", | |
"Epoch 29/200\n", | |
"Epoch 00028: val_loss improved from 0.86875 to 0.85228, saving model to best.model\n", | |
"0s - loss: 0.8061 - acc: 0.6778 - val_loss: 0.8523 - val_acc: 0.6000\n", | |
"Epoch 30/200\n", | |
"Epoch 00029: val_loss improved from 0.85228 to 0.83461, saving model to best.model\n", | |
"0s - loss: 0.8105 - acc: 0.6444 - val_loss: 0.8346 - val_acc: 0.6000\n", | |
"Epoch 31/200\n", | |
"Epoch 00030: val_loss improved from 0.83461 to 0.81738, saving model to best.model\n", | |
"0s - loss: 0.7878 - acc: 0.6667 - val_loss: 0.8174 - val_acc: 0.6000\n", | |
"Epoch 32/200\n", | |
"Epoch 00031: val_loss improved from 0.81738 to 0.79992, saving model to best.model\n", | |
"0s - loss: 0.7598 - acc: 0.6889 - val_loss: 0.7999 - val_acc: 0.6000\n", | |
"Epoch 33/200\n", | |
"Epoch 00032: val_loss improved from 0.79992 to 0.78234, saving model to best.model\n", | |
"0s - loss: 0.7494 - acc: 0.6889 - val_loss: 0.7823 - val_acc: 0.6000\n", | |
"Epoch 34/200\n", | |
"Epoch 00033: val_loss improved from 0.78234 to 0.76362, saving model to best.model\n", | |
"0s - loss: 0.7252 - acc: 0.7333 - val_loss: 0.7636 - val_acc: 0.6000\n", | |
"Epoch 35/200\n", | |
"Epoch 00034: val_loss improved from 0.76362 to 0.74384, saving model to best.model\n", | |
"0s - loss: 0.7446 - acc: 0.6667 - val_loss: 0.7438 - val_acc: 0.6000\n", | |
"Epoch 36/200\n", | |
"Epoch 00035: val_loss improved from 0.74384 to 0.72375, saving model to best.model\n", | |
"0s - loss: 0.7152 - acc: 0.6889 - val_loss: 0.7237 - val_acc: 0.6000\n", | |
"Epoch 37/200\n", | |
"Epoch 00036: val_loss improved from 0.72375 to 0.70302, saving model to best.model\n", | |
"0s - loss: 0.7055 - acc: 0.6667 - val_loss: 0.7030 - val_acc: 0.6000\n", | |
"Epoch 38/200\n", | |
"Epoch 00037: val_loss improved from 0.70302 to 0.68160, saving model to best.model\n", | |
"0s - loss: 0.6781 - acc: 0.7000 - val_loss: 0.6816 - val_acc: 0.6000\n", | |
"Epoch 39/200\n", | |
"Epoch 00038: val_loss improved from 0.68160 to 0.65875, saving model to best.model\n", | |
"0s - loss: 0.6922 - acc: 0.6667 - val_loss: 0.6587 - val_acc: 0.6000\n", | |
"Epoch 40/200\n", | |
"Epoch 00039: val_loss improved from 0.65875 to 0.63539, saving model to best.model\n", | |
"0s - loss: 0.6607 - acc: 0.6889 - val_loss: 0.6354 - val_acc: 0.6000\n", | |
"Epoch 41/200\n", | |
"Epoch 00040: val_loss improved from 0.63539 to 0.61127, saving model to best.model\n", | |
"0s - loss: 0.6444 - acc: 0.6556 - val_loss: 0.6113 - val_acc: 0.6000\n", | |
"Epoch 42/200\n", | |
"Epoch 00041: val_loss improved from 0.61127 to 0.58632, saving model to best.model\n", | |
"0s - loss: 0.6329 - acc: 0.6778 - val_loss: 0.5863 - val_acc: 0.6000\n", | |
"Epoch 43/200\n", | |
"Epoch 00042: val_loss improved from 0.58632 to 0.56098, saving model to best.model\n", | |
"0s - loss: 0.5976 - acc: 0.7222 - val_loss: 0.5610 - val_acc: 0.6000\n", | |
"Epoch 44/200\n", | |
"Epoch 00043: val_loss improved from 0.56098 to 0.53696, saving model to best.model\n", | |
"0s - loss: 0.5992 - acc: 0.7000 - val_loss: 0.5370 - val_acc: 0.6000\n", | |
"Epoch 45/200\n", | |
"Epoch 00044: val_loss improved from 0.53696 to 0.51484, saving model to best.model\n", | |
"0s - loss: 0.5760 - acc: 0.7222 - val_loss: 0.5148 - val_acc: 0.6000\n", | |
"Epoch 46/200\n", | |
"Epoch 00045: val_loss improved from 0.51484 to 0.49314, saving model to best.model\n", | |
"0s - loss: 0.5803 - acc: 0.7444 - val_loss: 0.4931 - val_acc: 0.6000\n", | |
"Epoch 47/200\n", | |
"Epoch 00046: val_loss improved from 0.49314 to 0.47289, saving model to best.model\n", | |
"0s - loss: 0.5596 - acc: 0.7000 - val_loss: 0.4729 - val_acc: 0.6000\n", | |
"Epoch 48/200\n", | |
"Epoch 00047: val_loss improved from 0.47289 to 0.45498, saving model to best.model\n", | |
"0s - loss: 0.5621 - acc: 0.7333 - val_loss: 0.4550 - val_acc: 0.6000\n", | |
"Epoch 49/200\n", | |
"Epoch 00048: val_loss improved from 0.45498 to 0.43881, saving model to best.model\n", | |
"0s - loss: 0.5483 - acc: 0.6889 - val_loss: 0.4388 - val_acc: 0.6000\n", | |
"Epoch 50/200\n", | |
"Epoch 00049: val_loss improved from 0.43881 to 0.42522, saving model to best.model\n", | |
"0s - loss: 0.5499 - acc: 0.7444 - val_loss: 0.4252 - val_acc: 0.6000\n", | |
"Epoch 51/200\n", | |
"Epoch 00050: val_loss improved from 0.42522 to 0.41288, saving model to best.model\n", | |
"0s - loss: 0.5131 - acc: 0.7444 - val_loss: 0.4129 - val_acc: 0.6000\n", | |
"Epoch 52/200\n", | |
"Epoch 00051: val_loss improved from 0.41288 to 0.40073, saving model to best.model\n", | |
"0s - loss: 0.4989 - acc: 0.7444 - val_loss: 0.4007 - val_acc: 0.6000\n", | |
"Epoch 53/200\n", | |
"Epoch 00052: val_loss improved from 0.40073 to 0.38843, saving model to best.model\n", | |
"0s - loss: 0.5099 - acc: 0.7444 - val_loss: 0.3884 - val_acc: 0.6000\n", | |
"Epoch 54/200\n", | |
"Epoch 00053: val_loss improved from 0.38843 to 0.37680, saving model to best.model\n", | |
"0s - loss: 0.4632 - acc: 0.7667 - val_loss: 0.3768 - val_acc: 0.6000\n", | |
"Epoch 55/200\n", | |
"Epoch 00054: val_loss improved from 0.37680 to 0.36614, saving model to best.model\n", | |
"0s - loss: 0.4729 - acc: 0.7111 - val_loss: 0.3661 - val_acc: 0.6000\n", | |
"Epoch 56/200\n", | |
"Epoch 00055: val_loss improved from 0.36614 to 0.35634, saving model to best.model\n", | |
"0s - loss: 0.4506 - acc: 0.7556 - val_loss: 0.3563 - val_acc: 0.6000\n", | |
"Epoch 57/200\n", | |
"Epoch 00056: val_loss improved from 0.35634 to 0.34569, saving model to best.model\n", | |
"0s - loss: 0.4805 - acc: 0.7333 - val_loss: 0.3457 - val_acc: 0.6000\n", | |
"Epoch 58/200\n", | |
"Epoch 00057: val_loss improved from 0.34569 to 0.33434, saving model to best.model\n", | |
"0s - loss: 0.4521 - acc: 0.7889 - val_loss: 0.3343 - val_acc: 0.7000\n", | |
"Epoch 59/200\n", | |
"Epoch 00058: val_loss improved from 0.33434 to 0.32490, saving model to best.model\n", | |
"0s - loss: 0.4473 - acc: 0.7556 - val_loss: 0.3249 - val_acc: 0.7000\n", | |
"Epoch 60/200\n", | |
"Epoch 00059: val_loss improved from 0.32490 to 0.31459, saving model to best.model\n", | |
"0s - loss: 0.4898 - acc: 0.7111 - val_loss: 0.3146 - val_acc: 0.9000\n", | |
"Epoch 61/200\n", | |
"Epoch 00060: val_loss improved from 0.31459 to 0.30396, saving model to best.model\n", | |
"0s - loss: 0.4458 - acc: 0.7778 - val_loss: 0.3040 - val_acc: 0.9000\n", | |
"Epoch 62/200\n", | |
"Epoch 00061: val_loss improved from 0.30396 to 0.29500, saving model to best.model\n", | |
"0s - loss: 0.4233 - acc: 0.7778 - val_loss: 0.2950 - val_acc: 1.0000\n", | |
"Epoch 63/200\n", | |
"Epoch 00062: val_loss improved from 0.29500 to 0.28831, saving model to best.model\n", | |
"0s - loss: 0.4448 - acc: 0.8111 - val_loss: 0.2883 - val_acc: 1.0000\n", | |
"Epoch 64/200\n", | |
"Epoch 00063: val_loss improved from 0.28831 to 0.28131, saving model to best.model\n", | |
"0s - loss: 0.4509 - acc: 0.7556 - val_loss: 0.2813 - val_acc: 1.0000\n", | |
"Epoch 65/200\n", | |
"Epoch 00064: val_loss improved from 0.28131 to 0.27650, saving model to best.model\n", | |
"0s - loss: 0.4137 - acc: 0.8111 - val_loss: 0.2765 - val_acc: 1.0000\n", | |
"Epoch 66/200\n", | |
"Epoch 00065: val_loss improved from 0.27650 to 0.26948, saving model to best.model\n", | |
"0s - loss: 0.4130 - acc: 0.7889 - val_loss: 0.2695 - val_acc: 1.0000\n", | |
"Epoch 67/200\n", | |
"Epoch 00066: val_loss improved from 0.26948 to 0.26423, saving model to best.model\n", | |
"0s - loss: 0.4165 - acc: 0.8333 - val_loss: 0.2642 - val_acc: 1.0000\n", | |
"Epoch 68/200\n", | |
"Epoch 00067: val_loss improved from 0.26423 to 0.25855, saving model to best.model\n", | |
"0s - loss: 0.4046 - acc: 0.8111 - val_loss: 0.2585 - val_acc: 1.0000\n", | |
"Epoch 69/200\n", | |
"Epoch 00068: val_loss improved from 0.25855 to 0.24801, saving model to best.model\n", | |
"0s - loss: 0.3771 - acc: 0.8333 - val_loss: 0.2480 - val_acc: 1.0000\n", | |
"Epoch 70/200\n", | |
"Epoch 00069: val_loss improved from 0.24801 to 0.24221, saving model to best.model\n", | |
"0s - loss: 0.4373 - acc: 0.7667 - val_loss: 0.2422 - val_acc: 1.0000\n", | |
"Epoch 71/200\n", | |
"Epoch 00070: val_loss improved from 0.24221 to 0.23847, saving model to best.model\n", | |
"0s - loss: 0.3871 - acc: 0.8222 - val_loss: 0.2385 - val_acc: 1.0000\n", | |
"Epoch 72/200\n", | |
"Epoch 00071: val_loss improved from 0.23847 to 0.23120, saving model to best.model\n", | |
"0s - loss: 0.3662 - acc: 0.8333 - val_loss: 0.2312 - val_acc: 1.0000\n", | |
"Epoch 73/200\n", | |
"Epoch 00072: val_loss improved from 0.23120 to 0.22281, saving model to best.model\n", | |
"0s - loss: 0.3844 - acc: 0.8222 - val_loss: 0.2228 - val_acc: 1.0000\n", | |
"Epoch 74/200\n", | |
"Epoch 00073: val_loss improved from 0.22281 to 0.21271, saving model to best.model\n", | |
"0s - loss: 0.3716 - acc: 0.8556 - val_loss: 0.2127 - val_acc: 1.0000\n", | |
"Epoch 75/200\n", | |
"Epoch 00074: val_loss improved from 0.21271 to 0.20344, saving model to best.model\n", | |
"0s - loss: 0.3900 - acc: 0.8444 - val_loss: 0.2034 - val_acc: 1.0000\n", | |
"Epoch 76/200\n", | |
"Epoch 00075: val_loss improved from 0.20344 to 0.19654, saving model to best.model\n", | |
"0s - loss: 0.3902 - acc: 0.8444 - val_loss: 0.1965 - val_acc: 1.0000\n", | |
"Epoch 77/200\n", | |
"Epoch 00076: val_loss improved from 0.19654 to 0.18931, saving model to best.model\n", | |
"0s - loss: 0.3684 - acc: 0.8556 - val_loss: 0.1893 - val_acc: 1.0000\n", | |
"Epoch 78/200\n", | |
"Epoch 00077: val_loss improved from 0.18931 to 0.18083, saving model to best.model\n", | |
"0s - loss: 0.4101 - acc: 0.8333 - val_loss: 0.1808 - val_acc: 1.0000\n", | |
"Epoch 79/200\n", | |
"Epoch 00078: val_loss improved from 0.18083 to 0.17040, saving model to best.model\n", | |
"0s - loss: 0.3166 - acc: 0.8889 - val_loss: 0.1704 - val_acc: 1.0000\n", | |
"Epoch 80/200\n", | |
"Epoch 00079: val_loss improved from 0.17040 to 0.15618, saving model to best.model\n", | |
"0s - loss: 0.3715 - acc: 0.8111 - val_loss: 0.1562 - val_acc: 1.0000\n", | |
"Epoch 81/200\n", | |
"Epoch 00080: val_loss improved from 0.15618 to 0.14397, saving model to best.model\n", | |
"0s - loss: 0.3273 - acc: 0.8889 - val_loss: 0.1440 - val_acc: 1.0000\n", | |
"Epoch 82/200\n", | |
"Epoch 00081: val_loss improved from 0.14397 to 0.13887, saving model to best.model\n", | |
"0s - loss: 0.3762 - acc: 0.8111 - val_loss: 0.1389 - val_acc: 1.0000\n", | |
"Epoch 83/200\n", | |
"Epoch 00082: val_loss improved from 0.13887 to 0.13879, saving model to best.model\n", | |
"0s - loss: 0.3019 - acc: 0.9222 - val_loss: 0.1388 - val_acc: 1.0000\n", | |
"Epoch 84/200\n", | |
"Epoch 00083: val_loss did not improve\n", | |
"0s - loss: 0.3729 - acc: 0.8333 - val_loss: 0.1434 - val_acc: 1.0000\n", | |
"Epoch 85/200\n", | |
"Epoch 00084: val_loss did not improve\n", | |
"0s - loss: 0.3696 - acc: 0.8222 - val_loss: 0.1417 - val_acc: 1.0000\n", | |
"Epoch 86/200\n", | |
"Epoch 00085: val_loss improved from 0.13879 to 0.13500, saving model to best.model\n", | |
"0s - loss: 0.2849 - acc: 0.9222 - val_loss: 0.1350 - val_acc: 1.0000\n", | |
"Epoch 87/200\n", | |
"Epoch 00086: val_loss improved from 0.13500 to 0.13028, saving model to best.model\n", | |
"0s - loss: 0.3408 - acc: 0.8111 - val_loss: 0.1303 - val_acc: 1.0000\n", | |
"Epoch 88/200\n", | |
"Epoch 00087: val_loss improved from 0.13028 to 0.12528, saving model to best.model\n", | |
"0s - loss: 0.3496 - acc: 0.8333 - val_loss: 0.1253 - val_acc: 1.0000\n", | |
"Epoch 89/200\n", | |
"Epoch 00088: val_loss improved from 0.12528 to 0.11742, saving model to best.model\n", | |
"0s - loss: 0.2947 - acc: 0.9000 - val_loss: 0.1174 - val_acc: 1.0000\n", | |
"Epoch 90/200\n", | |
"Epoch 00089: val_loss improved from 0.11742 to 0.11190, saving model to best.model\n", | |
"0s - loss: 0.2879 - acc: 0.8889 - val_loss: 0.1119 - val_acc: 1.0000\n", | |
"Epoch 91/200\n", | |
"Epoch 00090: val_loss improved from 0.11190 to 0.10572, saving model to best.model\n", | |
"0s - loss: 0.2919 - acc: 0.9000 - val_loss: 0.1057 - val_acc: 1.0000\n", | |
"Epoch 92/200\n", | |
"Epoch 00091: val_loss improved from 0.10572 to 0.10001, saving model to best.model\n", | |
"0s - loss: 0.3413 - acc: 0.8556 - val_loss: 0.1000 - val_acc: 1.0000\n", | |
"Epoch 93/200\n", | |
"Epoch 00092: val_loss improved from 0.10001 to 0.09468, saving model to best.model\n", | |
"0s - loss: 0.2729 - acc: 0.8889 - val_loss: 0.0947 - val_acc: 1.0000\n", | |
"Epoch 94/200\n", | |
"Epoch 00093: val_loss did not improve\n", | |
"0s - loss: 0.2617 - acc: 0.9111 - val_loss: 0.0958 - val_acc: 1.0000\n", | |
"Epoch 95/200\n", | |
"Epoch 00094: val_loss improved from 0.09468 to 0.09464, saving model to best.model\n", | |
"0s - loss: 0.3397 - acc: 0.8333 - val_loss: 0.0946 - val_acc: 1.0000\n", | |
"Epoch 96/200\n", | |
"Epoch 00095: val_loss did not improve\n", | |
"0s - loss: 0.2371 - acc: 0.9000 - val_loss: 0.0949 - val_acc: 1.0000\n", | |
"Epoch 97/200\n", | |
"Epoch 00096: val_loss improved from 0.09464 to 0.09109, saving model to best.model\n", | |
"0s - loss: 0.3045 - acc: 0.8889 - val_loss: 0.0911 - val_acc: 1.0000\n", | |
"Epoch 98/200\n", | |
"Epoch 00097: val_loss improved from 0.09109 to 0.08593, saving model to best.model\n", | |
"0s - loss: 0.2374 - acc: 0.9111 - val_loss: 0.0859 - val_acc: 1.0000\n", | |
"Epoch 99/200\n", | |
"Epoch 00098: val_loss improved from 0.08593 to 0.07931, saving model to best.model\n", | |
"0s - loss: 0.2779 - acc: 0.8778 - val_loss: 0.0793 - val_acc: 1.0000\n", | |
"Epoch 100/200\n", | |
"Epoch 00099: val_loss improved from 0.07931 to 0.07559, saving model to best.model\n", | |
"0s - loss: 0.2665 - acc: 0.8889 - val_loss: 0.0756 - val_acc: 1.0000\n", | |
"Epoch 101/200\n", | |
"Epoch 00100: val_loss improved from 0.07559 to 0.07473, saving model to best.model\n", | |
"0s - loss: 0.2796 - acc: 0.8778 - val_loss: 0.0747 - val_acc: 1.0000\n", | |
"Epoch 102/200\n", | |
"Epoch 00101: val_loss improved from 0.07473 to 0.06953, saving model to best.model\n", | |
"0s - loss: 0.3135 - acc: 0.8889 - val_loss: 0.0695 - val_acc: 1.0000\n", | |
"Epoch 103/200\n", | |
"Epoch 00102: val_loss improved from 0.06953 to 0.06781, saving model to best.model\n", | |
"0s - loss: 0.2300 - acc: 0.9111 - val_loss: 0.0678 - val_acc: 1.0000\n", | |
"Epoch 104/200\n", | |
"Epoch 00103: val_loss improved from 0.06781 to 0.06362, saving model to best.model\n", | |
"0s - loss: 0.2518 - acc: 0.8889 - val_loss: 0.0636 - val_acc: 1.0000\n", | |
"Epoch 105/200\n", | |
"Epoch 00104: val_loss improved from 0.06362 to 0.05916, saving model to best.model\n", | |
"0s - loss: 0.2041 - acc: 0.9333 - val_loss: 0.0592 - val_acc: 1.0000\n", | |
"Epoch 106/200\n", | |
"Epoch 00105: val_loss improved from 0.05916 to 0.05796, saving model to best.model\n", | |
"0s - loss: 0.2538 - acc: 0.9111 - val_loss: 0.0580 - val_acc: 1.0000\n", | |
"Epoch 107/200\n", | |
"Epoch 00106: val_loss improved from 0.05796 to 0.05709, saving model to best.model\n", | |
"0s - loss: 0.2589 - acc: 0.8667 - val_loss: 0.0571 - val_acc: 1.0000\n", | |
"Epoch 108/200\n", | |
"Epoch 00107: val_loss did not improve\n", | |
"0s - loss: 0.2331 - acc: 0.9111 - val_loss: 0.0577 - val_acc: 1.0000\n", | |
"Epoch 109/200\n", | |
"Epoch 00108: val_loss did not improve\n", | |
"0s - loss: 0.2047 - acc: 0.9222 - val_loss: 0.0586 - val_acc: 1.0000\n", | |
"Epoch 110/200\n", | |
"Epoch 00109: val_loss improved from 0.05709 to 0.05707, saving model to best.model\n", | |
"0s - loss: 0.2169 - acc: 0.9333 - val_loss: 0.0571 - val_acc: 1.0000\n", | |
"Epoch 111/200\n", | |
"Epoch 00110: val_loss improved from 0.05707 to 0.05547, saving model to best.model\n", | |
"0s - loss: 0.2318 - acc: 0.8889 - val_loss: 0.0555 - val_acc: 1.0000\n", | |
"Epoch 112/200\n", | |
"Epoch 00111: val_loss improved from 0.05547 to 0.05255, saving model to best.model\n", | |
"0s - loss: 0.2750 - acc: 0.9000 - val_loss: 0.0526 - val_acc: 1.0000\n", | |
"Epoch 113/200\n", | |
"Epoch 00112: val_loss improved from 0.05255 to 0.04658, saving model to best.model\n", | |
"0s - loss: 0.2177 - acc: 0.9222 - val_loss: 0.0466 - val_acc: 1.0000\n", | |
"Epoch 114/200\n", | |
"Epoch 00113: val_loss improved from 0.04658 to 0.04406, saving model to best.model\n", | |
"0s - loss: 0.2318 - acc: 0.8889 - val_loss: 0.0441 - val_acc: 1.0000\n", | |
"Epoch 115/200\n", | |
"Epoch 00114: val_loss improved from 0.04406 to 0.04065, saving model to best.model\n", | |
"0s - loss: 0.2152 - acc: 0.9000 - val_loss: 0.0407 - val_acc: 1.0000\n", | |
"Epoch 116/200\n", | |
"Epoch 00115: val_loss improved from 0.04065 to 0.03857, saving model to best.model\n", | |
"0s - loss: 0.2711 - acc: 0.9000 - val_loss: 0.0386 - val_acc: 1.0000\n", | |
"Epoch 117/200\n", | |
"Epoch 00116: val_loss improved from 0.03857 to 0.03680, saving model to best.model\n", | |
"0s - loss: 0.2390 - acc: 0.8889 - val_loss: 0.0368 - val_acc: 1.0000\n", | |
"Epoch 118/200\n", | |
"Epoch 00117: val_loss improved from 0.03680 to 0.03556, saving model to best.model\n", | |
"0s - loss: 0.2158 - acc: 0.9222 - val_loss: 0.0356 - val_acc: 1.0000\n", | |
"Epoch 119/200\n", | |
"Epoch 00118: val_loss improved from 0.03556 to 0.03383, saving model to best.model\n", | |
"0s - loss: 0.1831 - acc: 0.9333 - val_loss: 0.0338 - val_acc: 1.0000\n", | |
"Epoch 120/200\n", | |
"Epoch 00119: val_loss improved from 0.03383 to 0.03366, saving model to best.model\n", | |
"0s - loss: 0.1831 - acc: 0.9222 - val_loss: 0.0337 - val_acc: 1.0000\n", | |
"Epoch 121/200\n", | |
"Epoch 00120: val_loss did not improve\n", | |
"0s - loss: 0.2511 - acc: 0.8889 - val_loss: 0.0362 - val_acc: 1.0000\n", | |
"Epoch 122/200\n", | |
"Epoch 00121: val_loss did not improve\n", | |
"0s - loss: 0.1887 - acc: 0.9111 - val_loss: 0.0378 - val_acc: 1.0000\n", | |
"Epoch 123/200\n", | |
"Epoch 00122: val_loss did not improve\n", | |
"0s - loss: 0.2043 - acc: 0.9333 - val_loss: 0.0366 - val_acc: 1.0000\n", | |
"Epoch 124/200\n", | |
"Epoch 00123: val_loss improved from 0.03366 to 0.03214, saving model to best.model\n", | |
"0s - loss: 0.2271 - acc: 0.9111 - val_loss: 0.0321 - val_acc: 1.0000\n", | |
"Epoch 125/200\n", | |
"Epoch 00124: val_loss improved from 0.03214 to 0.02763, saving model to best.model\n", | |
"0s - loss: 0.2327 - acc: 0.9000 - val_loss: 0.0276 - val_acc: 1.0000\n", | |
"Epoch 126/200\n", | |
"Epoch 00125: val_loss improved from 0.02763 to 0.02509, saving model to best.model\n", | |
"0s - loss: 0.1962 - acc: 0.9333 - val_loss: 0.0251 - val_acc: 1.0000\n", | |
"Epoch 127/200\n", | |
"Epoch 00126: val_loss improved from 0.02509 to 0.02277, saving model to best.model\n", | |
"0s - loss: 0.1801 - acc: 0.9667 - val_loss: 0.0228 - val_acc: 1.0000\n", | |
"Epoch 128/200\n", | |
"Epoch 00127: val_loss improved from 0.02277 to 0.02169, saving model to best.model\n", | |
"0s - loss: 0.2340 - acc: 0.9111 - val_loss: 0.0217 - val_acc: 1.0000\n", | |
"Epoch 129/200\n", | |
"Epoch 00128: val_loss did not improve\n", | |
"0s - loss: 0.2265 - acc: 0.9000 - val_loss: 0.0234 - val_acc: 1.0000\n", | |
"Epoch 130/200\n", | |
"Epoch 00129: val_loss did not improve\n", | |
"0s - loss: 0.2156 - acc: 0.9111 - val_loss: 0.0262 - val_acc: 1.0000\n", | |
"Epoch 131/200\n", | |
"Epoch 00130: val_loss did not improve\n", | |
"0s - loss: 0.2128 - acc: 0.9444 - val_loss: 0.0276 - val_acc: 1.0000\n", | |
"Epoch 132/200\n", | |
"Epoch 00131: val_loss did not improve\n", | |
"0s - loss: 0.2165 - acc: 0.9222 - val_loss: 0.0279 - val_acc: 1.0000\n", | |
"Epoch 133/200\n", | |
"Epoch 00132: val_loss did not improve\n", | |
"0s - loss: 0.1654 - acc: 0.9222 - val_loss: 0.0304 - val_acc: 1.0000\n", | |
"Epoch 134/200\n", | |
"Epoch 00133: val_loss did not improve\n", | |
"0s - loss: 0.1637 - acc: 0.9333 - val_loss: 0.0317 - val_acc: 1.0000\n", | |
"Epoch 135/200\n", | |
"Epoch 00134: val_loss did not improve\n", | |
"0s - loss: 0.2024 - acc: 0.9444 - val_loss: 0.0297 - val_acc: 1.0000\n", | |
"Epoch 136/200\n", | |
"Epoch 00135: val_loss did not improve\n", | |
"0s - loss: 0.2085 - acc: 0.9111 - val_loss: 0.0275 - val_acc: 1.0000\n", | |
"Epoch 137/200\n", | |
"Epoch 00136: val_loss did not improve\n", | |
"0s - loss: 0.2651 - acc: 0.8889 - val_loss: 0.0250 - val_acc: 1.0000\n", | |
"Epoch 138/200\n", | |
"Epoch 00137: val_loss did not improve\n", | |
"0s - loss: 0.2585 - acc: 0.8667 - val_loss: 0.0232 - val_acc: 1.0000\n", | |
"Epoch 139/200\n", | |
"Epoch 00138: val_loss improved from 0.02169 to 0.02115, saving model to best.model\n", | |
"0s - loss: 0.1357 - acc: 0.9556 - val_loss: 0.0211 - val_acc: 1.0000\n", | |
"Epoch 140/200\n", | |
"Epoch 00139: val_loss improved from 0.02115 to 0.02025, saving model to best.model\n", | |
"0s - loss: 0.1881 - acc: 0.9111 - val_loss: 0.0203 - val_acc: 1.0000\n", | |
"Epoch 141/200\n", | |
"Epoch 00140: val_loss improved from 0.02025 to 0.01977, saving model to best.model\n", | |
"0s - loss: 0.2593 - acc: 0.9000 - val_loss: 0.0198 - val_acc: 1.0000\n", | |
"Epoch 142/200\n", | |
"Epoch 00141: val_loss did not improve\n", | |
"0s - loss: 0.1581 - acc: 0.9556 - val_loss: 0.0204 - val_acc: 1.0000\n", | |
"Epoch 143/200\n", | |
"Epoch 00142: val_loss did not improve\n", | |
"0s - loss: 0.1754 - acc: 0.9333 - val_loss: 0.0218 - val_acc: 1.0000\n", | |
"Epoch 144/200\n", | |
"Epoch 00143: val_loss did not improve\n", | |
"0s - loss: 0.1656 - acc: 0.9444 - val_loss: 0.0229 - val_acc: 1.0000\n", | |
"Epoch 145/200\n", | |
"Epoch 00144: val_loss did not improve\n", | |
"0s - loss: 0.1535 - acc: 0.9444 - val_loss: 0.0247 - val_acc: 1.0000\n", | |
"Epoch 146/200\n", | |
"Epoch 00145: val_loss did not improve\n", | |
"0s - loss: 0.1440 - acc: 0.9444 - val_loss: 0.0265 - val_acc: 1.0000\n", | |
"Epoch 147/200\n", | |
"Epoch 00146: val_loss did not improve\n", | |
"0s - loss: 0.2065 - acc: 0.9111 - val_loss: 0.0265 - val_acc: 1.0000\n", | |
"Epoch 148/200\n", | |
"Epoch 00147: val_loss did not improve\n", | |
"0s - loss: 0.1956 - acc: 0.9333 - val_loss: 0.0268 - val_acc: 1.0000\n", | |
"Epoch 149/200\n", | |
"Epoch 00148: val_loss did not improve\n", | |
"0s - loss: 0.1758 - acc: 0.9222 - val_loss: 0.0269 - val_acc: 1.0000\n", | |
"Epoch 150/200\n", | |
"Epoch 00149: val_loss did not improve\n", | |
"0s - loss: 0.1812 - acc: 0.9222 - val_loss: 0.0247 - val_acc: 1.0000\n", | |
"Epoch 151/200\n", | |
"Epoch 00150: val_loss did not improve\n", | |
"0s - loss: 0.1916 - acc: 0.9222 - val_loss: 0.0207 - val_acc: 1.0000\n", | |
"Epoch 152/200\n", | |
"Epoch 00151: val_loss improved from 0.01977 to 0.01855, saving model to best.model\n", | |
"0s - loss: 0.1915 - acc: 0.9222 - val_loss: 0.0186 - val_acc: 1.0000\n", | |
"Epoch 153/200\n", | |
"Epoch 00152: val_loss improved from 0.01855 to 0.01841, saving model to best.model\n", | |
"0s - loss: 0.1669 - acc: 0.9222 - val_loss: 0.0184 - val_acc: 1.0000\n", | |
"Epoch 154/200\n", | |
"Epoch 00153: val_loss improved from 0.01841 to 0.01818, saving model to best.model\n", | |
"0s - loss: 0.1722 - acc: 0.9333 - val_loss: 0.0182 - val_acc: 1.0000\n", | |
"Epoch 155/200\n", | |
"Epoch 00154: val_loss did not improve\n", | |
"0s - loss: 0.1658 - acc: 0.9333 - val_loss: 0.0182 - val_acc: 1.0000\n", | |
"Epoch 156/200\n", | |
"Epoch 00155: val_loss improved from 0.01818 to 0.01791, saving model to best.model\n", | |
"0s - loss: 0.1781 - acc: 0.9333 - val_loss: 0.0179 - val_acc: 1.0000\n", | |
"Epoch 157/200\n", | |
"Epoch 00156: val_loss improved from 0.01791 to 0.01778, saving model to best.model\n", | |
"0s - loss: 0.2025 - acc: 0.9222 - val_loss: 0.0178 - val_acc: 1.0000\n", | |
"Epoch 158/200\n", | |
"Epoch 00157: val_loss improved from 0.01778 to 0.01720, saving model to best.model\n", | |
"0s - loss: 0.1719 - acc: 0.9222 - val_loss: 0.0172 - val_acc: 1.0000\n", | |
"Epoch 159/200\n", | |
"Epoch 00158: val_loss improved from 0.01720 to 0.01576, saving model to best.model\n", | |
"0s - loss: 0.1613 - acc: 0.9556 - val_loss: 0.0158 - val_acc: 1.0000\n", | |
"Epoch 160/200\n", | |
"Epoch 00159: val_loss improved from 0.01576 to 0.01504, saving model to best.model\n", | |
"0s - loss: 0.1398 - acc: 0.9333 - val_loss: 0.0150 - val_acc: 1.0000\n", | |
"Epoch 161/200\n", | |
"Epoch 00160: val_loss improved from 0.01504 to 0.01456, saving model to best.model\n", | |
"0s - loss: 0.1481 - acc: 0.9444 - val_loss: 0.0146 - val_acc: 1.0000\n", | |
"Epoch 162/200\n", | |
"Epoch 00161: val_loss improved from 0.01456 to 0.01402, saving model to best.model\n", | |
"0s - loss: 0.1383 - acc: 0.9667 - val_loss: 0.0140 - val_acc: 1.0000\n", | |
"Epoch 163/200\n", | |
"Epoch 00162: val_loss improved from 0.01402 to 0.01338, saving model to best.model\n", | |
"0s - loss: 0.1741 - acc: 0.9222 - val_loss: 0.0134 - val_acc: 1.0000\n", | |
"Epoch 164/200\n", | |
"Epoch 00163: val_loss improved from 0.01338 to 0.01275, saving model to best.model\n", | |
"0s - loss: 0.1079 - acc: 0.9556 - val_loss: 0.0128 - val_acc: 1.0000\n", | |
"Epoch 165/200\n", | |
"Epoch 00164: val_loss improved from 0.01275 to 0.01237, saving model to best.model\n", | |
"0s - loss: 0.1773 - acc: 0.9333 - val_loss: 0.0124 - val_acc: 1.0000\n", | |
"Epoch 166/200\n", | |
"Epoch 00165: val_loss did not improve\n", | |
"0s - loss: 0.1892 - acc: 0.9556 - val_loss: 0.0125 - val_acc: 1.0000\n", | |
"Epoch 167/200\n", | |
"Epoch 00166: val_loss did not improve\n", | |
"0s - loss: 0.2196 - acc: 0.8889 - val_loss: 0.0135 - val_acc: 1.0000\n", | |
"Epoch 168/200\n", | |
"Epoch 00167: val_loss did not improve\n", | |
"0s - loss: 0.1863 - acc: 0.9111 - val_loss: 0.0142 - val_acc: 1.0000\n", | |
"Epoch 169/200\n", | |
"Epoch 00168: val_loss did not improve\n", | |
"0s - loss: 0.1479 - acc: 0.9556 - val_loss: 0.0141 - val_acc: 1.0000\n", | |
"Epoch 170/200\n", | |
"Epoch 00169: val_loss did not improve\n", | |
"0s - loss: 0.1413 - acc: 0.9556 - val_loss: 0.0137 - val_acc: 1.0000\n", | |
"Epoch 171/200\n", | |
"Epoch 00170: val_loss did not improve\n", | |
"0s - loss: 0.1870 - acc: 0.9222 - val_loss: 0.0125 - val_acc: 1.0000\n", | |
"Epoch 172/200\n", | |
"Epoch 00171: val_loss improved from 0.01237 to 0.01130, saving model to best.model\n", | |
"0s - loss: 0.1537 - acc: 0.9333 - val_loss: 0.0113 - val_acc: 1.0000\n", | |
"Epoch 173/200\n", | |
"Epoch 00172: val_loss improved from 0.01130 to 0.00982, saving model to best.model\n", | |
"0s - loss: 0.1611 - acc: 0.9222 - val_loss: 0.0098 - val_acc: 1.0000\n", | |
"Epoch 174/200\n", | |
"Epoch 00173: val_loss improved from 0.00982 to 0.00858, saving model to best.model\n", | |
"0s - loss: 0.1734 - acc: 0.9111 - val_loss: 0.0086 - val_acc: 1.0000\n", | |
"Epoch 175/200\n", | |
"Epoch 00174: val_loss improved from 0.00858 to 0.00811, saving model to best.model\n", | |
"0s - loss: 0.1729 - acc: 0.9222 - val_loss: 0.0081 - val_acc: 1.0000\n", | |
"Epoch 176/200\n", | |
"Epoch 00175: val_loss improved from 0.00811 to 0.00808, saving model to best.model\n", | |
"0s - loss: 0.1608 - acc: 0.9444 - val_loss: 0.0081 - val_acc: 1.0000\n", | |
"Epoch 177/200\n", | |
"Epoch 00176: val_loss did not improve\n", | |
"0s - loss: 0.1541 - acc: 0.9333 - val_loss: 0.0082 - val_acc: 1.0000\n", | |
"Epoch 178/200\n", | |
"Epoch 00177: val_loss did not improve\n", | |
"0s - loss: 0.1893 - acc: 0.9000 - val_loss: 0.0085 - val_acc: 1.0000\n", | |
"Epoch 179/200\n", | |
"Epoch 00178: val_loss did not improve\n", | |
"0s - loss: 0.2072 - acc: 0.8889 - val_loss: 0.0096 - val_acc: 1.0000\n", | |
"Epoch 180/200\n", | |
"Epoch 00179: val_loss did not improve\n", | |
"0s - loss: 0.1522 - acc: 0.9333 - val_loss: 0.0113 - val_acc: 1.0000\n", | |
"Epoch 181/200\n", | |
"Epoch 00180: val_loss did not improve\n", | |
"0s - loss: 0.2118 - acc: 0.9000 - val_loss: 0.0135 - val_acc: 1.0000\n", | |
"Epoch 182/200\n", | |
"Epoch 00181: val_loss did not improve\n", | |
"0s - loss: 0.1731 - acc: 0.9222 - val_loss: 0.0159 - val_acc: 1.0000\n", | |
"Epoch 183/200\n", | |
"Epoch 00182: val_loss did not improve\n", | |
"0s - loss: 0.1377 - acc: 0.9333 - val_loss: 0.0173 - val_acc: 1.0000\n", | |
"Epoch 184/200\n", | |
"Epoch 00183: val_loss did not improve\n", | |
"0s - loss: 0.2300 - acc: 0.9333 - val_loss: 0.0174 - val_acc: 1.0000\n", | |
"Epoch 185/200\n", | |
"Epoch 00184: val_loss did not improve\n", | |
"0s - loss: 0.1670 - acc: 0.9444 - val_loss: 0.0152 - val_acc: 1.0000\n", | |
"Epoch 186/200\n", | |
"Epoch 00185: val_loss did not improve\n", | |
"0s - loss: 0.1442 - acc: 0.9556 - val_loss: 0.0131 - val_acc: 1.0000\n", | |
"Epoch 187/200\n", | |
"Epoch 00186: val_loss did not improve\n", | |
"0s - loss: 0.2044 - acc: 0.9444 - val_loss: 0.0107 - val_acc: 1.0000\n", | |
"Epoch 188/200\n", | |
"Epoch 00187: val_loss did not improve\n", | |
"0s - loss: 0.1881 - acc: 0.9111 - val_loss: 0.0090 - val_acc: 1.0000\n", | |
"Epoch 189/200\n", | |
"Epoch 00188: val_loss improved from 0.00808 to 0.00764, saving model to best.model\n", | |
"0s - loss: 0.2339 - acc: 0.9333 - val_loss: 0.0076 - val_acc: 1.0000\n", | |
"Epoch 190/200\n", | |
"Epoch 00189: val_loss improved from 0.00764 to 0.00720, saving model to best.model\n", | |
"0s - loss: 0.2333 - acc: 0.8778 - val_loss: 0.0072 - val_acc: 1.0000\n", | |
"Epoch 191/200\n", | |
"Epoch 00190: val_loss improved from 0.00720 to 0.00719, saving model to best.model\n", | |
"0s - loss: 0.1509 - acc: 0.9222 - val_loss: 0.0072 - val_acc: 1.0000\n", | |
"Epoch 192/200\n", | |
"Epoch 00191: val_loss did not improve\n", | |
"0s - loss: 0.1375 - acc: 0.9222 - val_loss: 0.0073 - val_acc: 1.0000\n", | |
"Epoch 193/200\n", | |
"Epoch 00192: val_loss did not improve\n", | |
"0s - loss: 0.1452 - acc: 0.9333 - val_loss: 0.0076 - val_acc: 1.0000\n", | |
"Epoch 194/200\n", | |
"Epoch 00193: val_loss did not improve\n", | |
"0s - loss: 0.1451 - acc: 0.9222 - val_loss: 0.0081 - val_acc: 1.0000\n", | |
"Epoch 195/200\n", | |
"Epoch 00194: val_loss did not improve\n", | |
"0s - loss: 0.1357 - acc: 0.9556 - val_loss: 0.0089 - val_acc: 1.0000\n", | |
"Epoch 196/200\n", | |
"Epoch 00195: val_loss did not improve\n", | |
"0s - loss: 0.1554 - acc: 0.9222 - val_loss: 0.0100 - val_acc: 1.0000\n", | |
"Epoch 197/200\n", | |
"Epoch 00196: val_loss did not improve\n", | |
"0s - loss: 0.1492 - acc: 0.9333 - val_loss: 0.0115 - val_acc: 1.0000\n", | |
"Epoch 198/200\n", | |
"Epoch 00197: val_loss did not improve\n", | |
"0s - loss: 0.1160 - acc: 0.9444 - val_loss: 0.0126 - val_acc: 1.0000\n", | |
"Epoch 199/200\n", | |
"Epoch 00198: val_loss did not improve\n", | |
"0s - loss: 0.1582 - acc: 0.9222 - val_loss: 0.0130 - val_acc: 1.0000\n", | |
"Epoch 200/200\n", | |
"Epoch 00199: val_loss did not improve\n", | |
"0s - loss: 0.1966 - acc: 0.9222 - val_loss: 0.0118 - val_acc: 1.0000\n" | |
] | |
}, | |
{ | |
"data": { | |
"text/plain": [ | |
"<keras.callbacks.History at 0x11d31bdd0>" | |
] | |
}, | |
"execution_count": 8, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"m.fit(\n", | |
" # Feature matrix\n", | |
" X_train, \n", | |
" # Target class one-hot-encoded\n", | |
" pd.get_dummies(pd.DataFrame(y_train), columns=[0]).as_matrix(),\n", | |
" # Iterations to be run if not stopped by EarlyStopping\n", | |
" epochs=200, \n", | |
" callbacks=[\n", | |
" # Stop iterations when validation loss has not improved\n", | |
" EarlyStopping(monitor='val_loss', patience=25),\n", | |
" # Nice for keeping the last model before overfitting occurs\n", | |
" ModelCheckpoint(\n", | |
" 'best.model', \n", | |
" monitor='val_loss',\n", | |
" save_best_only=True,\n", | |
" verbose=1\n", | |
" )\n", | |
" ],\n", | |
" verbose=2,\n", | |
" validation_split=0.1,\n", | |
" batch_size=256, \n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 9, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [], | |
"source": [ | |
"# Load the best model\n", | |
"m.load_weights(\"best.model\")\n", | |
"\n", | |
"# Keep track of what class corresponds to what index\n", | |
"mapping = (\n", | |
" pd.get_dummies(pd.DataFrame(y_train), columns=[0], prefix='', prefix_sep='')\n", | |
" .columns.astype(int).values\n", | |
")\n", | |
"y_test_preds = [mapping[pred] for pred in m.predict(X_test).argmax(axis=1)]" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 10, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/html": [ | |
"<div>\n", | |
"<style>\n", | |
" .dataframe thead tr:only-child th {\n", | |
" text-align: right;\n", | |
" }\n", | |
"\n", | |
" .dataframe thead th {\n", | |
" text-align: left;\n", | |
" }\n", | |
"\n", | |
" .dataframe tbody tr th {\n", | |
" vertical-align: top;\n", | |
" }\n", | |
"</style>\n", | |
"<table border=\"1\" class=\"dataframe\">\n", | |
" <thead>\n", | |
" <tr style=\"text-align: right;\">\n", | |
" <th>Predicted</th>\n", | |
" <th>0</th>\n", | |
" <th>1</th>\n", | |
" <th>2</th>\n", | |
" <th>All</th>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>Actual</th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" </tr>\n", | |
" </thead>\n", | |
" <tbody>\n", | |
" <tr>\n", | |
" <th>0</th>\n", | |
" <td>18</td>\n", | |
" <td>0</td>\n", | |
" <td>0</td>\n", | |
" <td>18</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>1</th>\n", | |
" <td>0</td>\n", | |
" <td>16</td>\n", | |
" <td>0</td>\n", | |
" <td>16</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>2</th>\n", | |
" <td>0</td>\n", | |
" <td>0</td>\n", | |
" <td>16</td>\n", | |
" <td>16</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>All</th>\n", | |
" <td>18</td>\n", | |
" <td>16</td>\n", | |
" <td>16</td>\n", | |
" <td>50</td>\n", | |
" </tr>\n", | |
" </tbody>\n", | |
"</table>\n", | |
"</div>" | |
], | |
"text/plain": [ | |
"Predicted 0 1 2 All\n", | |
"Actual \n", | |
"0 18 0 0 18\n", | |
"1 0 16 0 16\n", | |
"2 0 0 16 16\n", | |
"All 18 16 16 50" | |
] | |
}, | |
"execution_count": 10, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"pd.crosstab(\n", | |
" pd.Series(y_test, name='Actual'),\n", | |
" pd.Series(y_test_preds, name='Predicted'),\n", | |
" margins=True\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 11, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Accuracy: 1.000\n" | |
] | |
} | |
], | |
"source": [ | |
"print 'Accuracy: {0:.3f}'.format(accuracy_score(y_test, y_test_preds))" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 18, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/html": [ | |
"<div>\n", | |
"<style>\n", | |
" .dataframe thead tr:only-child th {\n", | |
" text-align: right;\n", | |
" }\n", | |
"\n", | |
" .dataframe thead th {\n", | |
" text-align: left;\n", | |
" }\n", | |
"\n", | |
" .dataframe tbody tr th {\n", | |
" vertical-align: top;\n", | |
" }\n", | |
"</style>\n", | |
"<table border=\"1\" class=\"dataframe\">\n", | |
" <thead>\n", | |
" <tr style=\"text-align: right;\">\n", | |
" <th>Predicted</th>\n", | |
" <th>0</th>\n", | |
" <th>1</th>\n", | |
" <th>2</th>\n", | |
" <th>All</th>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>Actual</th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" </tr>\n", | |
" </thead>\n", | |
" <tbody>\n", | |
" <tr>\n", | |
" <th>0</th>\n", | |
" <td>18</td>\n", | |
" <td>0</td>\n", | |
" <td>0</td>\n", | |
" <td>18</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>1</th>\n", | |
" <td>0</td>\n", | |
" <td>15</td>\n", | |
" <td>1</td>\n", | |
" <td>16</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>2</th>\n", | |
" <td>0</td>\n", | |
" <td>2</td>\n", | |
" <td>14</td>\n", | |
" <td>16</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>All</th>\n", | |
" <td>18</td>\n", | |
" <td>17</td>\n", | |
" <td>15</td>\n", | |
" <td>50</td>\n", | |
" </tr>\n", | |
" </tbody>\n", | |
"</table>\n", | |
"</div>" | |
], | |
"text/plain": [ | |
"Predicted 0 1 2 All\n", | |
"Actual \n", | |
"0 18 0 0 18\n", | |
"1 0 15 1 16\n", | |
"2 0 2 14 16\n", | |
"All 18 17 15 50" | |
] | |
}, | |
"execution_count": 18, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"from xgboost.sklearn import XGBClassifier\n", | |
"from sklearn.model_selection import GridSearchCV\n", | |
"\n", | |
"params_fixed = {\n", | |
" 'objective': 'binary:logistic',\n", | |
" 'silent': 1,\n", | |
" 'seed': seed,\n", | |
"}\n", | |
"\n", | |
"space = {\n", | |
" 'max_depth': [2, 3, 5],\n", | |
" 'learning_rate': [10**-4, 10**-3, 10**-2, 10**-1],\n", | |
" 'n_estimators': [1000], \n", | |
" 'min_child_weight': [1, 5, 20]\n", | |
"}\n", | |
"\n", | |
"\n", | |
"clf = GridSearchCV(XGBClassifier(**params_fixed), space)\n", | |
"clf.fit(X_train, y_train)\n", | |
"y_test_preds = clf.predict(X_test)\n", | |
"\n", | |
"pd.crosstab(\n", | |
" pd.Series(y_test, name='Actual'),\n", | |
" pd.Series(y_test_preds, name='Predicted'),\n", | |
" margins=True\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 19, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Accuracy: 0.940\n" | |
] | |
} | |
], | |
"source": [ | |
"print 'Accuracy: {0:.3f}'.format(accuracy_score(y_test, y_test_preds))" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 14, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [ | |
"def compare_on_dataset(data, target_variable=None, lr=0.001, patience=150):\n", | |
" \n", | |
" from IPython.display import display\n", | |
" \n", | |
" df = (\n", | |
" pd.read_csv(data)\n", | |
"\n", | |
" # Rename columns to lowercase and underscores\n", | |
" .pipe(lambda d: d.rename(columns={\n", | |
" k: v for k, v in zip(\n", | |
" d.columns, \n", | |
" [c.lower().replace(' ', '_') for c in d.columns]\n", | |
" )\n", | |
" }))\n", | |
" # Switch categorical classes to integers\n", | |
" .assign(**{target_variable: lambda r: r[target_variable].astype('category').cat.codes})\n", | |
" .pipe(lambda d: pd.get_dummies(d))\n", | |
" )\n", | |
"\n", | |
" y = df[target_variable].values\n", | |
" X = (\n", | |
" # Drop target variable\n", | |
" df.drop(target_variable, axis=1)\n", | |
" # Min-max-scaling (only needed for the DL model)\n", | |
" .pipe(lambda d: (d-d.min())/d.max()).fillna(0)\n", | |
" .as_matrix()\n", | |
" )\n", | |
"\n", | |
" X_train, X_test, y_train, y_test = train_test_split(\n", | |
" X, y, test_size=0.33, random_state=seed\n", | |
" )\n", | |
"\n", | |
" m = Sequential()\n", | |
" m.add(Dense(128, activation='relu', input_shape=(X.shape[1],)))\n", | |
" m.add(Dropout(0.5))\n", | |
" m.add(Dense(128, activation='relu'))\n", | |
" m.add(Dropout(0.5))\n", | |
" m.add(Dense(128, activation='relu'))\n", | |
" m.add(Dropout(0.5))\n", | |
" m.add(Dense(len(np.unique(y)), activation='softmax'))\n", | |
"\n", | |
" m.compile(\n", | |
" optimizer=optimizers.Adam(lr=lr),\n", | |
" loss='categorical_crossentropy',\n", | |
" metrics=['accuracy']\n", | |
" )\n", | |
"\n", | |
" m.fit(\n", | |
" # Feature matrix\n", | |
" X_train, \n", | |
" # Target class one-hot-encoded\n", | |
" pd.get_dummies(pd.DataFrame(y_train), columns=[0]).as_matrix(),\n", | |
" # Iterations to be run if not stopped by EarlyStopping\n", | |
" epochs=200, \n", | |
" callbacks=[\n", | |
" EarlyStopping(monitor='val_loss', patience=patience),\n", | |
" ModelCheckpoint(\n", | |
" 'best.model', \n", | |
" monitor='val_loss',\n", | |
" save_best_only=True,\n", | |
" verbose=1\n", | |
" )\n", | |
" ],\n", | |
" verbose=2,\n", | |
" validation_split=0.1,\n", | |
" batch_size=256, \n", | |
" )\n", | |
"\n", | |
" # Keep track of what class corresponds to what index\n", | |
" mapping = (\n", | |
" pd.get_dummies(pd.DataFrame(y_train), columns=[0], prefix='', prefix_sep='')\n", | |
" .columns.astype(int).values\n", | |
" )\n", | |
" \n", | |
" # Load the best model\n", | |
" m.load_weights(\"best.model\")\n", | |
" y_test_preds = [mapping[pred] for pred in m.predict(X_test).argmax(axis=1)]\n", | |
"\n", | |
" print 'Three layer deep neural net'\n", | |
" display(pd.crosstab(\n", | |
" pd.Series(y_test, name='Actual'),\n", | |
" pd.Series(y_test_preds, name='Predicted'),\n", | |
" margins=True\n", | |
" ))\n", | |
"\n", | |
" print 'Accuracy: {0:.3f}'.format(accuracy_score(y_test, y_test_preds)) \n", | |
" boostrap_stats_samples = [\n", | |
" np.random.choice((y_test == y_test_preds), size=int(len(y_test)*.5)).mean() \n", | |
" for _ in range(10000)\n", | |
" ]\n", | |
" print 'Boostrapped accuracy 95 % interval', np.percentile(boostrap_stats_samples, 5), np.percentile(boostrap_stats_samples, 95)\n", | |
"\n", | |
" params_fixed = {\n", | |
" 'objective': 'binary:logistic',\n", | |
" 'silent': 1,\n", | |
" 'seed': seed,\n", | |
" }\n", | |
"\n", | |
" space = {\n", | |
" 'max_depth': [2, 3, 5],\n", | |
" 'learning_rate': [10**-4, 10**-3, 10**-2, 10**-1],\n", | |
" 'n_estimators': [1000], \n", | |
" 'min_child_weight': [1, 5, 20]\n", | |
" }\n", | |
"\n", | |
"\n", | |
" clf = GridSearchCV(XGBClassifier(**params_fixed), space)\n", | |
" clf.fit(X_train, y_train)\n", | |
" y_test_preds = clf.predict(X_test)\n", | |
" \n", | |
" print ''\n", | |
" print 'Xgboost'\n", | |
" display(pd.crosstab(\n", | |
" pd.Series(y_test, name='Actual'),\n", | |
" pd.Series(y_test_preds, name='Predicted'),\n", | |
" margins=True\n", | |
" ))\n", | |
" print 'Accuracy: {0:.3f}'.format(accuracy_score(y_test, y_test_preds))\n", | |
" boostrap_stats_samples = [\n", | |
" np.random.choice((y_test == y_test_preds), size=int(len(y_test)*.5)).mean() \n", | |
" for _ in range(10000)\n", | |
" ]\n", | |
" print 'Boostrapped accuracy 95 % interval', np.percentile(boostrap_stats_samples, 5), '-', np.percentile(boostrap_stats_samples, 95)" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Three class wine dataset (n=59)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 15, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Train on 107 samples, validate on 12 samples\n", | |
"Epoch 1/200\n", | |
"Epoch 00000: val_loss improved from inf to 1.06694, saving model to best.model\n", | |
"0s - loss: 1.1149 - acc: 0.3178 - val_loss: 1.0669 - val_acc: 0.6667\n", | |
"Epoch 2/200\n", | |
"Epoch 00001: val_loss improved from 1.06694 to 1.05808, saving model to best.model\n", | |
"0s - loss: 1.0858 - acc: 0.3738 - val_loss: 1.0581 - val_acc: 0.6667\n", | |
"Epoch 3/200\n", | |
"Epoch 00002: val_loss improved from 1.05808 to 1.05044, saving model to best.model\n", | |
"0s - loss: 1.1158 - acc: 0.2897 - val_loss: 1.0504 - val_acc: 0.9167\n", | |
"Epoch 4/200\n", | |
"Epoch 00003: val_loss improved from 1.05044 to 1.04367, saving model to best.model\n", | |
"0s - loss: 1.0716 - acc: 0.4486 - val_loss: 1.0437 - val_acc: 0.8333\n", | |
"Epoch 5/200\n", | |
"Epoch 00004: val_loss improved from 1.04367 to 1.03652, saving model to best.model\n", | |
"0s - loss: 1.0580 - acc: 0.4299 - val_loss: 1.0365 - val_acc: 0.8333\n", | |
"Epoch 6/200\n", | |
"Epoch 00005: val_loss improved from 1.03652 to 1.02894, saving model to best.model\n", | |
"0s - loss: 1.0450 - acc: 0.4673 - val_loss: 1.0289 - val_acc: 0.8333\n", | |
"Epoch 7/200\n", | |
"Epoch 00006: val_loss improved from 1.02894 to 1.02116, saving model to best.model\n", | |
"0s - loss: 1.0779 - acc: 0.3925 - val_loss: 1.0212 - val_acc: 0.7500\n", | |
"Epoch 8/200\n", | |
"Epoch 00007: val_loss improved from 1.02116 to 1.01342, saving model to best.model\n", | |
"0s - loss: 1.0869 - acc: 0.3458 - val_loss: 1.0134 - val_acc: 0.5000\n", | |
"Epoch 9/200\n", | |
"Epoch 00008: val_loss improved from 1.01342 to 1.00554, saving model to best.model\n", | |
"0s - loss: 1.0436 - acc: 0.4860 - val_loss: 1.0055 - val_acc: 0.5000\n", | |
"Epoch 10/200\n", | |
"Epoch 00009: val_loss improved from 1.00554 to 0.99617, saving model to best.model\n", | |
"0s - loss: 1.0244 - acc: 0.4673 - val_loss: 0.9962 - val_acc: 0.5000\n", | |
"Epoch 11/200\n", | |
"Epoch 00010: val_loss improved from 0.99617 to 0.98640, saving model to best.model\n", | |
"0s - loss: 1.0343 - acc: 0.4579 - val_loss: 0.9864 - val_acc: 0.5833\n", | |
"Epoch 12/200\n", | |
"Epoch 00011: val_loss improved from 0.98640 to 0.97581, saving model to best.model\n", | |
"0s - loss: 1.0193 - acc: 0.5140 - val_loss: 0.9758 - val_acc: 0.6667\n", | |
"Epoch 13/200\n", | |
"Epoch 00012: val_loss improved from 0.97581 to 0.96341, saving model to best.model\n", | |
"0s - loss: 0.9834 - acc: 0.5888 - val_loss: 0.9634 - val_acc: 0.7500\n", | |
"Epoch 14/200\n", | |
"Epoch 00013: val_loss improved from 0.96341 to 0.94999, saving model to best.model\n", | |
"0s - loss: 1.0199 - acc: 0.5047 - val_loss: 0.9500 - val_acc: 0.7500\n", | |
"Epoch 15/200\n", | |
"Epoch 00014: val_loss improved from 0.94999 to 0.93551, saving model to best.model\n", | |
"0s - loss: 1.0069 - acc: 0.5047 - val_loss: 0.9355 - val_acc: 0.7500\n", | |
"Epoch 16/200\n", | |
"Epoch 00015: val_loss improved from 0.93551 to 0.92070, saving model to best.model\n", | |
"0s - loss: 0.9755 - acc: 0.5701 - val_loss: 0.9207 - val_acc: 0.8333\n", | |
"Epoch 17/200\n", | |
"Epoch 00016: val_loss improved from 0.92070 to 0.90541, saving model to best.model\n", | |
"0s - loss: 0.9725 - acc: 0.5701 - val_loss: 0.9054 - val_acc: 0.9167\n", | |
"Epoch 18/200\n", | |
"Epoch 00017: val_loss improved from 0.90541 to 0.88907, saving model to best.model\n", | |
"0s - loss: 0.9676 - acc: 0.5888 - val_loss: 0.8891 - val_acc: 0.9167\n", | |
"Epoch 19/200\n", | |
"Epoch 00018: val_loss improved from 0.88907 to 0.87130, saving model to best.model\n", | |
"0s - loss: 0.9641 - acc: 0.6168 - val_loss: 0.8713 - val_acc: 0.9167\n", | |
"Epoch 20/200\n", | |
"Epoch 00019: val_loss improved from 0.87130 to 0.85264, saving model to best.model\n", | |
"0s - loss: 0.9417 - acc: 0.5981 - val_loss: 0.8526 - val_acc: 0.9167\n", | |
"Epoch 21/200\n", | |
"Epoch 00020: val_loss improved from 0.85264 to 0.83382, saving model to best.model\n", | |
"0s - loss: 0.9418 - acc: 0.5981 - val_loss: 0.8338 - val_acc: 0.9167\n", | |
"Epoch 22/200\n", | |
"Epoch 00021: val_loss improved from 0.83382 to 0.81417, saving model to best.model\n", | |
"0s - loss: 0.9171 - acc: 0.6822 - val_loss: 0.8142 - val_acc: 0.9167\n", | |
"Epoch 23/200\n", | |
"Epoch 00022: val_loss improved from 0.81417 to 0.79399, saving model to best.model\n", | |
"0s - loss: 0.9266 - acc: 0.6168 - val_loss: 0.7940 - val_acc: 0.9167\n", | |
"Epoch 24/200\n", | |
"Epoch 00023: val_loss improved from 0.79399 to 0.77285, saving model to best.model\n", | |
"0s - loss: 0.8594 - acc: 0.7103 - val_loss: 0.7729 - val_acc: 0.9167\n", | |
"Epoch 25/200\n", | |
"Epoch 00024: val_loss improved from 0.77285 to 0.75021, saving model to best.model\n", | |
"0s - loss: 0.8650 - acc: 0.6542 - val_loss: 0.7502 - val_acc: 0.9167\n", | |
"Epoch 26/200\n", | |
"Epoch 00025: val_loss improved from 0.75021 to 0.72741, saving model to best.model\n", | |
"0s - loss: 0.8475 - acc: 0.7103 - val_loss: 0.7274 - val_acc: 0.9167\n", | |
"Epoch 27/200\n", | |
"Epoch 00026: val_loss improved from 0.72741 to 0.70398, saving model to best.model\n", | |
"0s - loss: 0.8345 - acc: 0.6542 - val_loss: 0.7040 - val_acc: 0.9167\n", | |
"Epoch 28/200\n", | |
"Epoch 00027: val_loss improved from 0.70398 to 0.68054, saving model to best.model\n", | |
"0s - loss: 0.8077 - acc: 0.7290 - val_loss: 0.6805 - val_acc: 0.9167\n", | |
"Epoch 29/200\n", | |
"Epoch 00028: val_loss improved from 0.68054 to 0.65715, saving model to best.model\n", | |
"0s - loss: 0.8347 - acc: 0.7290 - val_loss: 0.6571 - val_acc: 0.9167\n", | |
"Epoch 30/200\n", | |
"Epoch 00029: val_loss improved from 0.65715 to 0.63391, saving model to best.model\n", | |
"0s - loss: 0.7884 - acc: 0.6916 - val_loss: 0.6339 - val_acc: 0.9167\n", | |
"Epoch 31/200\n", | |
"Epoch 00030: val_loss improved from 0.63391 to 0.61063, saving model to best.model\n", | |
"0s - loss: 0.7564 - acc: 0.7570 - val_loss: 0.6106 - val_acc: 0.9167\n", | |
"Epoch 32/200\n", | |
"Epoch 00031: val_loss improved from 0.61063 to 0.58730, saving model to best.model\n", | |
"0s - loss: 0.7337 - acc: 0.7944 - val_loss: 0.5873 - val_acc: 0.9167\n", | |
"Epoch 33/200\n", | |
"Epoch 00032: val_loss improved from 0.58730 to 0.56331, saving model to best.model\n", | |
"0s - loss: 0.7361 - acc: 0.7477 - val_loss: 0.5633 - val_acc: 0.9167\n", | |
"Epoch 34/200\n", | |
"Epoch 00033: val_loss improved from 0.56331 to 0.53955, saving model to best.model\n", | |
"0s - loss: 0.7499 - acc: 0.7383 - val_loss: 0.5396 - val_acc: 0.9167\n", | |
"Epoch 35/200\n", | |
"Epoch 00034: val_loss improved from 0.53955 to 0.51575, saving model to best.model\n", | |
"0s - loss: 0.6865 - acc: 0.7477 - val_loss: 0.5157 - val_acc: 0.9167\n", | |
"Epoch 36/200\n", | |
"Epoch 00035: val_loss improved from 0.51575 to 0.49275, saving model to best.model\n", | |
"0s - loss: 0.7152 - acc: 0.7196 - val_loss: 0.4927 - val_acc: 0.9167\n", | |
"Epoch 37/200\n", | |
"Epoch 00036: val_loss improved from 0.49275 to 0.47055, saving model to best.model\n", | |
"0s - loss: 0.6378 - acc: 0.7664 - val_loss: 0.4705 - val_acc: 0.9167\n", | |
"Epoch 38/200\n", | |
"Epoch 00037: val_loss improved from 0.47055 to 0.44808, saving model to best.model\n", | |
"0s - loss: 0.6631 - acc: 0.7664 - val_loss: 0.4481 - val_acc: 0.9167\n", | |
"Epoch 39/200\n", | |
"Epoch 00038: val_loss improved from 0.44808 to 0.42568, saving model to best.model\n", | |
"0s - loss: 0.6182 - acc: 0.7477 - val_loss: 0.4257 - val_acc: 0.9167\n", | |
"Epoch 40/200\n", | |
"Epoch 00039: val_loss improved from 0.42568 to 0.40521, saving model to best.model\n", | |
"0s - loss: 0.5979 - acc: 0.7944 - val_loss: 0.4052 - val_acc: 0.9167\n", | |
"Epoch 41/200\n", | |
"Epoch 00040: val_loss improved from 0.40521 to 0.38437, saving model to best.model\n", | |
"0s - loss: 0.5975 - acc: 0.7850 - val_loss: 0.3844 - val_acc: 0.9167\n", | |
"Epoch 42/200\n", | |
"Epoch 00041: val_loss improved from 0.38437 to 0.36491, saving model to best.model\n", | |
"0s - loss: 0.5154 - acc: 0.8224 - val_loss: 0.3649 - val_acc: 0.9167\n", | |
"Epoch 43/200\n", | |
"Epoch 00042: val_loss improved from 0.36491 to 0.34659, saving model to best.model\n", | |
"0s - loss: 0.5505 - acc: 0.8131 - val_loss: 0.3466 - val_acc: 0.9167\n", | |
"Epoch 44/200\n", | |
"Epoch 00043: val_loss improved from 0.34659 to 0.33125, saving model to best.model\n", | |
"0s - loss: 0.4891 - acc: 0.8505 - val_loss: 0.3312 - val_acc: 0.9167\n", | |
"Epoch 45/200\n", | |
"Epoch 00044: val_loss improved from 0.33125 to 0.31587, saving model to best.model\n", | |
"0s - loss: 0.5540 - acc: 0.8037 - val_loss: 0.3159 - val_acc: 0.9167\n", | |
"Epoch 46/200\n", | |
"Epoch 00045: val_loss improved from 0.31587 to 0.29899, saving model to best.model\n", | |
"0s - loss: 0.5366 - acc: 0.7944 - val_loss: 0.2990 - val_acc: 0.9167\n", | |
"Epoch 47/200\n", | |
"Epoch 00046: val_loss improved from 0.29899 to 0.28378, saving model to best.model\n", | |
"0s - loss: 0.4772 - acc: 0.8692 - val_loss: 0.2838 - val_acc: 0.9167\n", | |
"Epoch 48/200\n", | |
"Epoch 00047: val_loss improved from 0.28378 to 0.26878, saving model to best.model\n", | |
"0s - loss: 0.4042 - acc: 0.8879 - val_loss: 0.2688 - val_acc: 0.9167\n", | |
"Epoch 49/200\n", | |
"Epoch 00048: val_loss improved from 0.26878 to 0.25491, saving model to best.model\n", | |
"0s - loss: 0.4471 - acc: 0.8411 - val_loss: 0.2549 - val_acc: 0.9167\n", | |
"Epoch 50/200\n", | |
"Epoch 00049: val_loss improved from 0.25491 to 0.24067, saving model to best.model\n", | |
"0s - loss: 0.4305 - acc: 0.8598 - val_loss: 0.2407 - val_acc: 0.9167\n", | |
"Epoch 51/200\n", | |
"Epoch 00050: val_loss improved from 0.24067 to 0.22862, saving model to best.model\n", | |
"0s - loss: 0.3773 - acc: 0.8972 - val_loss: 0.2286 - val_acc: 1.0000\n", | |
"Epoch 52/200\n", | |
"Epoch 00051: val_loss improved from 0.22862 to 0.21962, saving model to best.model\n", | |
"0s - loss: 0.4445 - acc: 0.8411 - val_loss: 0.2196 - val_acc: 0.9167\n", | |
"Epoch 53/200\n", | |
"Epoch 00052: val_loss improved from 0.21962 to 0.21341, saving model to best.model\n", | |
"0s - loss: 0.4403 - acc: 0.7850 - val_loss: 0.2134 - val_acc: 0.9167\n", | |
"Epoch 54/200\n", | |
"Epoch 00053: val_loss improved from 0.21341 to 0.20756, saving model to best.model\n", | |
"0s - loss: 0.3584 - acc: 0.8785 - val_loss: 0.2076 - val_acc: 0.9167\n", | |
"Epoch 55/200\n", | |
"Epoch 00054: val_loss improved from 0.20756 to 0.20240, saving model to best.model\n", | |
"0s - loss: 0.3580 - acc: 0.8785 - val_loss: 0.2024 - val_acc: 0.9167\n", | |
"Epoch 56/200\n", | |
"Epoch 00055: val_loss improved from 0.20240 to 0.19546, saving model to best.model\n", | |
"0s - loss: 0.3395 - acc: 0.9252 - val_loss: 0.1955 - val_acc: 0.9167\n", | |
"Epoch 57/200\n", | |
"Epoch 00056: val_loss improved from 0.19546 to 0.18614, saving model to best.model\n", | |
"0s - loss: 0.3354 - acc: 0.9065 - val_loss: 0.1861 - val_acc: 0.9167\n", | |
"Epoch 58/200\n", | |
"Epoch 00057: val_loss improved from 0.18614 to 0.17385, saving model to best.model\n", | |
"0s - loss: 0.3956 - acc: 0.8785 - val_loss: 0.1739 - val_acc: 0.9167\n", | |
"Epoch 59/200\n", | |
"Epoch 00058: val_loss improved from 0.17385 to 0.16126, saving model to best.model\n", | |
"0s - loss: 0.3365 - acc: 0.9065 - val_loss: 0.1613 - val_acc: 1.0000\n", | |
"Epoch 60/200\n", | |
"Epoch 00059: val_loss improved from 0.16126 to 0.15117, saving model to best.model\n", | |
"0s - loss: 0.3204 - acc: 0.8972 - val_loss: 0.1512 - val_acc: 1.0000\n", | |
"Epoch 61/200\n", | |
"Epoch 00060: val_loss improved from 0.15117 to 0.13855, saving model to best.model\n", | |
"0s - loss: 0.2795 - acc: 0.9159 - val_loss: 0.1385 - val_acc: 1.0000\n", | |
"Epoch 62/200\n", | |
"Epoch 00061: val_loss improved from 0.13855 to 0.13196, saving model to best.model\n", | |
"0s - loss: 0.3060 - acc: 0.8879 - val_loss: 0.1320 - val_acc: 1.0000\n", | |
"Epoch 63/200\n", | |
"Epoch 00062: val_loss improved from 0.13196 to 0.13196, saving model to best.model\n", | |
"0s - loss: 0.3032 - acc: 0.9065 - val_loss: 0.1320 - val_acc: 1.0000\n", | |
"Epoch 64/200\n", | |
"Epoch 00063: val_loss did not improve\n", | |
"0s - loss: 0.2903 - acc: 0.8972 - val_loss: 0.1344 - val_acc: 1.0000\n", | |
"Epoch 65/200\n", | |
"Epoch 00064: val_loss did not improve\n", | |
"0s - loss: 0.2965 - acc: 0.8972 - val_loss: 0.1328 - val_acc: 1.0000\n", | |
"Epoch 66/200\n", | |
"Epoch 00065: val_loss improved from 0.13196 to 0.12495, saving model to best.model\n", | |
"0s - loss: 0.2253 - acc: 0.9252 - val_loss: 0.1250 - val_acc: 1.0000\n", | |
"Epoch 67/200\n", | |
"Epoch 00066: val_loss improved from 0.12495 to 0.12172, saving model to best.model\n", | |
"0s - loss: 0.2586 - acc: 0.8785 - val_loss: 0.1217 - val_acc: 1.0000\n", | |
"Epoch 68/200\n", | |
"Epoch 00067: val_loss improved from 0.12172 to 0.11465, saving model to best.model\n", | |
"0s - loss: 0.2197 - acc: 0.9346 - val_loss: 0.1147 - val_acc: 1.0000\n", | |
"Epoch 69/200\n", | |
"Epoch 00068: val_loss improved from 0.11465 to 0.11156, saving model to best.model\n", | |
"0s - loss: 0.3059 - acc: 0.8692 - val_loss: 0.1116 - val_acc: 1.0000\n", | |
"Epoch 70/200\n", | |
"Epoch 00069: val_loss improved from 0.11156 to 0.11017, saving model to best.model\n", | |
"0s - loss: 0.2502 - acc: 0.8972 - val_loss: 0.1102 - val_acc: 1.0000\n", | |
"Epoch 71/200\n", | |
"Epoch 00070: val_loss improved from 0.11017 to 0.10434, saving model to best.model\n", | |
"0s - loss: 0.2401 - acc: 0.9159 - val_loss: 0.1043 - val_acc: 1.0000\n", | |
"Epoch 72/200\n", | |
"Epoch 00071: val_loss improved from 0.10434 to 0.09865, saving model to best.model\n", | |
"0s - loss: 0.2543 - acc: 0.9159 - val_loss: 0.0986 - val_acc: 1.0000\n", | |
"Epoch 73/200\n", | |
"Epoch 00072: val_loss improved from 0.09865 to 0.09239, saving model to best.model\n", | |
"0s - loss: 0.2703 - acc: 0.8879 - val_loss: 0.0924 - val_acc: 1.0000\n", | |
"Epoch 74/200\n", | |
"Epoch 00073: val_loss improved from 0.09239 to 0.08409, saving model to best.model\n", | |
"0s - loss: 0.2504 - acc: 0.9065 - val_loss: 0.0841 - val_acc: 1.0000\n", | |
"Epoch 75/200\n", | |
"Epoch 00074: val_loss improved from 0.08409 to 0.07549, saving model to best.model\n", | |
"0s - loss: 0.2818 - acc: 0.9065 - val_loss: 0.0755 - val_acc: 1.0000\n", | |
"Epoch 76/200\n", | |
"Epoch 00075: val_loss improved from 0.07549 to 0.07058, saving model to best.model\n", | |
"0s - loss: 0.2371 - acc: 0.8879 - val_loss: 0.0706 - val_acc: 1.0000\n", | |
"Epoch 77/200\n", | |
"Epoch 00076: val_loss improved from 0.07058 to 0.06885, saving model to best.model\n", | |
"0s - loss: 0.2907 - acc: 0.8972 - val_loss: 0.0688 - val_acc: 1.0000\n", | |
"Epoch 78/200\n", | |
"Epoch 00077: val_loss improved from 0.06885 to 0.06772, saving model to best.model\n", | |
"0s - loss: 0.2724 - acc: 0.8879 - val_loss: 0.0677 - val_acc: 1.0000\n", | |
"Epoch 79/200\n", | |
"Epoch 00078: val_loss did not improve\n", | |
"0s - loss: 0.2170 - acc: 0.9159 - val_loss: 0.0680 - val_acc: 1.0000\n", | |
"Epoch 80/200\n", | |
"Epoch 00079: val_loss did not improve\n", | |
"0s - loss: 0.2379 - acc: 0.9065 - val_loss: 0.0720 - val_acc: 1.0000\n", | |
"Epoch 81/200\n", | |
"Epoch 00080: val_loss did not improve\n", | |
"0s - loss: 0.2435 - acc: 0.9159 - val_loss: 0.0773 - val_acc: 1.0000\n", | |
"Epoch 82/200\n", | |
"Epoch 00081: val_loss did not improve\n", | |
"0s - loss: 0.1929 - acc: 0.9439 - val_loss: 0.0896 - val_acc: 1.0000\n", | |
"Epoch 83/200\n", | |
"Epoch 00082: val_loss did not improve\n", | |
"0s - loss: 0.1729 - acc: 0.9439 - val_loss: 0.1000 - val_acc: 1.0000\n", | |
"Epoch 84/200\n", | |
"Epoch 00083: val_loss did not improve\n", | |
"0s - loss: 0.2282 - acc: 0.9252 - val_loss: 0.1083 - val_acc: 1.0000\n", | |
"Epoch 85/200\n", | |
"Epoch 00084: val_loss did not improve\n", | |
"0s - loss: 0.1820 - acc: 0.9626 - val_loss: 0.1119 - val_acc: 0.9167\n", | |
"Epoch 86/200\n", | |
"Epoch 00085: val_loss did not improve\n", | |
"0s - loss: 0.2165 - acc: 0.9252 - val_loss: 0.1083 - val_acc: 0.9167\n", | |
"Epoch 87/200\n", | |
"Epoch 00086: val_loss did not improve\n", | |
"0s - loss: 0.1825 - acc: 0.9159 - val_loss: 0.0941 - val_acc: 1.0000\n", | |
"Epoch 88/200\n", | |
"Epoch 00087: val_loss did not improve\n", | |
"0s - loss: 0.1884 - acc: 0.9346 - val_loss: 0.0769 - val_acc: 1.0000\n", | |
"Epoch 89/200\n", | |
"Epoch 00088: val_loss improved from 0.06772 to 0.06362, saving model to best.model\n", | |
"0s - loss: 0.2249 - acc: 0.8972 - val_loss: 0.0636 - val_acc: 1.0000\n", | |
"Epoch 90/200\n", | |
"Epoch 00089: val_loss improved from 0.06362 to 0.05359, saving model to best.model\n", | |
"0s - loss: 0.2536 - acc: 0.9159 - val_loss: 0.0536 - val_acc: 1.0000\n", | |
"Epoch 91/200\n", | |
"Epoch 00090: val_loss improved from 0.05359 to 0.04854, saving model to best.model\n", | |
"0s - loss: 0.1667 - acc: 0.9439 - val_loss: 0.0485 - val_acc: 1.0000\n", | |
"Epoch 92/200\n", | |
"Epoch 00091: val_loss improved from 0.04854 to 0.04507, saving model to best.model\n", | |
"0s - loss: 0.1502 - acc: 0.9626 - val_loss: 0.0451 - val_acc: 1.0000\n", | |
"Epoch 93/200\n", | |
"Epoch 00092: val_loss improved from 0.04507 to 0.04320, saving model to best.model\n", | |
"0s - loss: 0.2025 - acc: 0.9252 - val_loss: 0.0432 - val_acc: 1.0000\n", | |
"Epoch 94/200\n", | |
"Epoch 00093: val_loss improved from 0.04320 to 0.04223, saving model to best.model\n", | |
"0s - loss: 0.2215 - acc: 0.9439 - val_loss: 0.0422 - val_acc: 1.0000\n", | |
"Epoch 95/200\n", | |
"Epoch 00094: val_loss improved from 0.04223 to 0.04170, saving model to best.model\n", | |
"0s - loss: 0.1649 - acc: 0.9626 - val_loss: 0.0417 - val_acc: 1.0000\n", | |
"Epoch 96/200\n", | |
"Epoch 00095: val_loss did not improve\n", | |
"0s - loss: 0.1987 - acc: 0.9346 - val_loss: 0.0427 - val_acc: 1.0000\n", | |
"Epoch 97/200\n", | |
"Epoch 00096: val_loss did not improve\n", | |
"0s - loss: 0.1506 - acc: 0.9533 - val_loss: 0.0440 - val_acc: 1.0000\n", | |
"Epoch 98/200\n", | |
"Epoch 00097: val_loss did not improve\n", | |
"0s - loss: 0.1180 - acc: 0.9813 - val_loss: 0.0455 - val_acc: 1.0000\n", | |
"Epoch 99/200\n", | |
"Epoch 00098: val_loss did not improve\n", | |
"0s - loss: 0.1390 - acc: 0.9533 - val_loss: 0.0474 - val_acc: 1.0000\n", | |
"Epoch 100/200\n", | |
"Epoch 00099: val_loss did not improve\n", | |
"0s - loss: 0.2049 - acc: 0.9252 - val_loss: 0.0486 - val_acc: 1.0000\n", | |
"Epoch 101/200\n", | |
"Epoch 00100: val_loss did not improve\n", | |
"0s - loss: 0.1642 - acc: 0.9533 - val_loss: 0.0472 - val_acc: 1.0000\n", | |
"Epoch 102/200\n", | |
"Epoch 00101: val_loss did not improve\n", | |
"0s - loss: 0.1392 - acc: 0.9720 - val_loss: 0.0472 - val_acc: 1.0000\n", | |
"Epoch 103/200\n", | |
"Epoch 00102: val_loss did not improve\n", | |
"0s - loss: 0.1202 - acc: 0.9720 - val_loss: 0.0480 - val_acc: 1.0000\n", | |
"Epoch 104/200\n", | |
"Epoch 00103: val_loss did not improve\n", | |
"0s - loss: 0.2050 - acc: 0.9252 - val_loss: 0.0468 - val_acc: 1.0000\n", | |
"Epoch 105/200\n", | |
"Epoch 00104: val_loss did not improve\n", | |
"0s - loss: 0.0971 - acc: 0.9813 - val_loss: 0.0434 - val_acc: 1.0000\n", | |
"Epoch 106/200\n", | |
"Epoch 00105: val_loss improved from 0.04170 to 0.03921, saving model to best.model\n", | |
"0s - loss: 0.1529 - acc: 0.9346 - val_loss: 0.0392 - val_acc: 1.0000\n", | |
"Epoch 107/200\n", | |
"Epoch 00106: val_loss improved from 0.03921 to 0.03566, saving model to best.model\n", | |
"0s - loss: 0.1027 - acc: 0.9533 - val_loss: 0.0357 - val_acc: 1.0000\n", | |
"Epoch 108/200\n", | |
"Epoch 00107: val_loss improved from 0.03566 to 0.03281, saving model to best.model\n", | |
"0s - loss: 0.1447 - acc: 0.9439 - val_loss: 0.0328 - val_acc: 1.0000\n", | |
"Epoch 109/200\n", | |
"Epoch 00108: val_loss improved from 0.03281 to 0.03109, saving model to best.model\n", | |
"0s - loss: 0.1457 - acc: 0.9346 - val_loss: 0.0311 - val_acc: 1.0000\n", | |
"Epoch 110/200\n", | |
"Epoch 00109: val_loss improved from 0.03109 to 0.02957, saving model to best.model\n", | |
"0s - loss: 0.1729 - acc: 0.9252 - val_loss: 0.0296 - val_acc: 1.0000\n", | |
"Epoch 111/200\n", | |
"Epoch 00110: val_loss improved from 0.02957 to 0.02843, saving model to best.model\n", | |
"0s - loss: 0.0989 - acc: 0.9720 - val_loss: 0.0284 - val_acc: 1.0000\n", | |
"Epoch 112/200\n", | |
"Epoch 00111: val_loss improved from 0.02843 to 0.02798, saving model to best.model\n", | |
"0s - loss: 0.1171 - acc: 0.9346 - val_loss: 0.0280 - val_acc: 1.0000\n", | |
"Epoch 113/200\n", | |
"Epoch 00112: val_loss improved from 0.02798 to 0.02726, saving model to best.model\n", | |
"0s - loss: 0.1169 - acc: 0.9626 - val_loss: 0.0273 - val_acc: 1.0000\n", | |
"Epoch 114/200\n", | |
"Epoch 00113: val_loss improved from 0.02726 to 0.02677, saving model to best.model\n", | |
"0s - loss: 0.1031 - acc: 0.9720 - val_loss: 0.0268 - val_acc: 1.0000\n", | |
"Epoch 115/200\n", | |
"Epoch 00114: val_loss improved from 0.02677 to 0.02604, saving model to best.model\n", | |
"0s - loss: 0.0972 - acc: 0.9626 - val_loss: 0.0260 - val_acc: 1.0000\n", | |
"Epoch 116/200\n", | |
"Epoch 00115: val_loss improved from 0.02604 to 0.02558, saving model to best.model\n", | |
"0s - loss: 0.0898 - acc: 0.9813 - val_loss: 0.0256 - val_acc: 1.0000\n", | |
"Epoch 117/200\n", | |
"Epoch 00116: val_loss did not improve\n", | |
"0s - loss: 0.1294 - acc: 0.9439 - val_loss: 0.0261 - val_acc: 1.0000\n", | |
"Epoch 118/200\n", | |
"Epoch 00117: val_loss did not improve\n", | |
"0s - loss: 0.1387 - acc: 0.9533 - val_loss: 0.0270 - val_acc: 1.0000\n", | |
"Epoch 119/200\n", | |
"Epoch 00118: val_loss did not improve\n", | |
"0s - loss: 0.1020 - acc: 0.9533 - val_loss: 0.0267 - val_acc: 1.0000\n", | |
"Epoch 120/200\n", | |
"Epoch 00119: val_loss improved from 0.02558 to 0.02497, saving model to best.model\n", | |
"0s - loss: 0.0994 - acc: 0.9626 - val_loss: 0.0250 - val_acc: 1.0000\n", | |
"Epoch 121/200\n", | |
"Epoch 00120: val_loss improved from 0.02497 to 0.02257, saving model to best.model\n", | |
"0s - loss: 0.1556 - acc: 0.9533 - val_loss: 0.0226 - val_acc: 1.0000\n", | |
"Epoch 122/200\n", | |
"Epoch 00121: val_loss improved from 0.02257 to 0.02038, saving model to best.model\n", | |
"0s - loss: 0.1290 - acc: 0.9626 - val_loss: 0.0204 - val_acc: 1.0000\n", | |
"Epoch 123/200\n", | |
"Epoch 00122: val_loss improved from 0.02038 to 0.01872, saving model to best.model\n", | |
"0s - loss: 0.0778 - acc: 0.9626 - val_loss: 0.0187 - val_acc: 1.0000\n", | |
"Epoch 124/200\n", | |
"Epoch 00123: val_loss improved from 0.01872 to 0.01750, saving model to best.model\n", | |
"0s - loss: 0.1181 - acc: 0.9626 - val_loss: 0.0175 - val_acc: 1.0000\n", | |
"Epoch 125/200\n", | |
"Epoch 00124: val_loss improved from 0.01750 to 0.01687, saving model to best.model\n", | |
"0s - loss: 0.1015 - acc: 0.9626 - val_loss: 0.0169 - val_acc: 1.0000\n", | |
"Epoch 126/200\n", | |
"Epoch 00125: val_loss improved from 0.01687 to 0.01639, saving model to best.model\n", | |
"0s - loss: 0.1194 - acc: 0.9439 - val_loss: 0.0164 - val_acc: 1.0000\n", | |
"Epoch 127/200\n", | |
"Epoch 00126: val_loss improved from 0.01639 to 0.01604, saving model to best.model\n", | |
"0s - loss: 0.1115 - acc: 0.9720 - val_loss: 0.0160 - val_acc: 1.0000\n", | |
"Epoch 128/200\n", | |
"Epoch 00127: val_loss improved from 0.01604 to 0.01603, saving model to best.model\n", | |
"0s - loss: 0.1121 - acc: 0.9533 - val_loss: 0.0160 - val_acc: 1.0000\n", | |
"Epoch 129/200\n", | |
"Epoch 00128: val_loss did not improve\n", | |
"0s - loss: 0.1050 - acc: 0.9720 - val_loss: 0.0162 - val_acc: 1.0000\n", | |
"Epoch 130/200\n", | |
"Epoch 00129: val_loss did not improve\n", | |
"0s - loss: 0.0671 - acc: 0.9813 - val_loss: 0.0169 - val_acc: 1.0000\n", | |
"Epoch 131/200\n", | |
"Epoch 00130: val_loss did not improve\n", | |
"0s - loss: 0.1009 - acc: 0.9626 - val_loss: 0.0171 - val_acc: 1.0000\n", | |
"Epoch 132/200\n", | |
"Epoch 00131: val_loss did not improve\n", | |
"0s - loss: 0.0751 - acc: 0.9813 - val_loss: 0.0171 - val_acc: 1.0000\n", | |
"Epoch 133/200\n", | |
"Epoch 00132: val_loss did not improve\n", | |
"0s - loss: 0.0625 - acc: 0.9907 - val_loss: 0.0165 - val_acc: 1.0000\n", | |
"Epoch 134/200\n", | |
"Epoch 00133: val_loss did not improve\n", | |
"0s - loss: 0.1046 - acc: 0.9907 - val_loss: 0.0161 - val_acc: 1.0000\n", | |
"Epoch 135/200\n", | |
"Epoch 00134: val_loss improved from 0.01603 to 0.01546, saving model to best.model\n", | |
"0s - loss: 0.1119 - acc: 0.9533 - val_loss: 0.0155 - val_acc: 1.0000\n", | |
"Epoch 136/200\n", | |
"Epoch 00135: val_loss improved from 0.01546 to 0.01524, saving model to best.model\n", | |
"0s - loss: 0.0940 - acc: 0.9813 - val_loss: 0.0152 - val_acc: 1.0000\n", | |
"Epoch 137/200\n", | |
"Epoch 00136: val_loss did not improve\n", | |
"0s - loss: 0.0607 - acc: 0.9720 - val_loss: 0.0153 - val_acc: 1.0000\n", | |
"Epoch 138/200\n", | |
"Epoch 00137: val_loss improved from 0.01524 to 0.01467, saving model to best.model\n", | |
"0s - loss: 0.0823 - acc: 0.9626 - val_loss: 0.0147 - val_acc: 1.0000\n", | |
"Epoch 139/200\n", | |
"Epoch 00138: val_loss improved from 0.01467 to 0.01435, saving model to best.model\n", | |
"0s - loss: 0.0450 - acc: 1.0000 - val_loss: 0.0144 - val_acc: 1.0000\n", | |
"Epoch 140/200\n", | |
"Epoch 00139: val_loss improved from 0.01435 to 0.01397, saving model to best.model\n", | |
"0s - loss: 0.1103 - acc: 0.9533 - val_loss: 0.0140 - val_acc: 1.0000\n", | |
"Epoch 141/200\n", | |
"Epoch 00140: val_loss improved from 0.01397 to 0.01290, saving model to best.model\n", | |
"0s - loss: 0.0711 - acc: 0.9626 - val_loss: 0.0129 - val_acc: 1.0000\n", | |
"Epoch 142/200\n", | |
"Epoch 00141: val_loss improved from 0.01290 to 0.01241, saving model to best.model\n", | |
"0s - loss: 0.0654 - acc: 0.9813 - val_loss: 0.0124 - val_acc: 1.0000\n", | |
"Epoch 143/200\n", | |
"Epoch 00142: val_loss improved from 0.01241 to 0.01222, saving model to best.model\n", | |
"0s - loss: 0.0653 - acc: 0.9720 - val_loss: 0.0122 - val_acc: 1.0000\n", | |
"Epoch 144/200\n", | |
"Epoch 00143: val_loss improved from 0.01222 to 0.01178, saving model to best.model\n", | |
"0s - loss: 0.0634 - acc: 0.9720 - val_loss: 0.0118 - val_acc: 1.0000\n", | |
"Epoch 145/200\n", | |
"Epoch 00144: val_loss improved from 0.01178 to 0.01121, saving model to best.model\n", | |
"0s - loss: 0.0388 - acc: 1.0000 - val_loss: 0.0112 - val_acc: 1.0000\n", | |
"Epoch 146/200\n", | |
"Epoch 00145: val_loss improved from 0.01121 to 0.01051, saving model to best.model\n", | |
"0s - loss: 0.1109 - acc: 0.9626 - val_loss: 0.0105 - val_acc: 1.0000\n", | |
"Epoch 147/200\n", | |
"Epoch 00146: val_loss did not improve\n", | |
"0s - loss: 0.1140 - acc: 0.9626 - val_loss: 0.0105 - val_acc: 1.0000\n", | |
"Epoch 148/200\n", | |
"Epoch 00147: val_loss did not improve\n", | |
"0s - loss: 0.0613 - acc: 0.9720 - val_loss: 0.0106 - val_acc: 1.0000\n", | |
"Epoch 149/200\n", | |
"Epoch 00148: val_loss improved from 0.01051 to 0.01004, saving model to best.model\n", | |
"0s - loss: 0.1069 - acc: 0.9533 - val_loss: 0.0100 - val_acc: 1.0000\n", | |
"Epoch 150/200\n", | |
"Epoch 00149: val_loss improved from 0.01004 to 0.00970, saving model to best.model\n", | |
"0s - loss: 0.0345 - acc: 0.9907 - val_loss: 0.0097 - val_acc: 1.0000\n", | |
"Epoch 151/200\n", | |
"Epoch 00150: val_loss improved from 0.00970 to 0.00915, saving model to best.model\n", | |
"0s - loss: 0.0685 - acc: 0.9626 - val_loss: 0.0091 - val_acc: 1.0000\n", | |
"Epoch 152/200\n", | |
"Epoch 00151: val_loss improved from 0.00915 to 0.00887, saving model to best.model\n", | |
"0s - loss: 0.0788 - acc: 0.9626 - val_loss: 0.0089 - val_acc: 1.0000\n", | |
"Epoch 153/200\n", | |
"Epoch 00152: val_loss improved from 0.00887 to 0.00864, saving model to best.model\n", | |
"0s - loss: 0.0662 - acc: 0.9626 - val_loss: 0.0086 - val_acc: 1.0000\n", | |
"Epoch 154/200\n", | |
"Epoch 00153: val_loss did not improve\n", | |
"0s - loss: 0.1299 - acc: 0.9626 - val_loss: 0.0087 - val_acc: 1.0000\n", | |
"Epoch 155/200\n", | |
"Epoch 00154: val_loss did not improve\n", | |
"0s - loss: 0.0687 - acc: 0.9720 - val_loss: 0.0088 - val_acc: 1.0000\n", | |
"Epoch 156/200\n", | |
"Epoch 00155: val_loss did not improve\n", | |
"0s - loss: 0.0323 - acc: 0.9907 - val_loss: 0.0089 - val_acc: 1.0000\n", | |
"Epoch 157/200\n", | |
"Epoch 00156: val_loss did not improve\n", | |
"0s - loss: 0.0568 - acc: 0.9720 - val_loss: 0.0092 - val_acc: 1.0000\n", | |
"Epoch 158/200\n", | |
"Epoch 00157: val_loss did not improve\n", | |
"0s - loss: 0.0565 - acc: 0.9907 - val_loss: 0.0092 - val_acc: 1.0000\n", | |
"Epoch 159/200\n", | |
"Epoch 00158: val_loss did not improve\n", | |
"0s - loss: 0.0834 - acc: 0.9626 - val_loss: 0.0089 - val_acc: 1.0000\n", | |
"Epoch 160/200\n", | |
"Epoch 00159: val_loss improved from 0.00864 to 0.00856, saving model to best.model\n", | |
"0s - loss: 0.0712 - acc: 0.9813 - val_loss: 0.0086 - val_acc: 1.0000\n", | |
"Epoch 161/200\n", | |
"Epoch 00160: val_loss improved from 0.00856 to 0.00832, saving model to best.model\n", | |
"0s - loss: 0.0618 - acc: 0.9907 - val_loss: 0.0083 - val_acc: 1.0000\n", | |
"Epoch 162/200\n", | |
"Epoch 00161: val_loss improved from 0.00832 to 0.00831, saving model to best.model\n", | |
"0s - loss: 0.0513 - acc: 0.9813 - val_loss: 0.0083 - val_acc: 1.0000\n", | |
"Epoch 163/200\n", | |
"Epoch 00162: val_loss improved from 0.00831 to 0.00828, saving model to best.model\n", | |
"0s - loss: 0.0835 - acc: 0.9813 - val_loss: 0.0083 - val_acc: 1.0000\n", | |
"Epoch 164/200\n", | |
"Epoch 00163: val_loss improved from 0.00828 to 0.00798, saving model to best.model\n", | |
"0s - loss: 0.0787 - acc: 0.9813 - val_loss: 0.0080 - val_acc: 1.0000\n", | |
"Epoch 165/200\n", | |
"Epoch 00164: val_loss improved from 0.00798 to 0.00769, saving model to best.model\n", | |
"0s - loss: 0.0475 - acc: 0.9813 - val_loss: 0.0077 - val_acc: 1.0000\n", | |
"Epoch 166/200\n", | |
"Epoch 00165: val_loss improved from 0.00769 to 0.00747, saving model to best.model\n", | |
"0s - loss: 0.1388 - acc: 0.9533 - val_loss: 0.0075 - val_acc: 1.0000\n", | |
"Epoch 167/200\n", | |
"Epoch 00166: val_loss improved from 0.00747 to 0.00736, saving model to best.model\n", | |
"0s - loss: 0.0736 - acc: 0.9626 - val_loss: 0.0074 - val_acc: 1.0000\n", | |
"Epoch 168/200\n", | |
"Epoch 00167: val_loss improved from 0.00736 to 0.00731, saving model to best.model\n", | |
"0s - loss: 0.0676 - acc: 0.9813 - val_loss: 0.0073 - val_acc: 1.0000\n", | |
"Epoch 169/200\n", | |
"Epoch 00168: val_loss improved from 0.00731 to 0.00730, saving model to best.model\n", | |
"0s - loss: 0.0626 - acc: 0.9813 - val_loss: 0.0073 - val_acc: 1.0000\n", | |
"Epoch 170/200\n", | |
"Epoch 00169: val_loss did not improve\n", | |
"0s - loss: 0.0512 - acc: 0.9907 - val_loss: 0.0073 - val_acc: 1.0000\n", | |
"Epoch 171/200\n", | |
"Epoch 00170: val_loss improved from 0.00730 to 0.00726, saving model to best.model\n", | |
"0s - loss: 0.0294 - acc: 0.9907 - val_loss: 0.0073 - val_acc: 1.0000\n", | |
"Epoch 172/200\n", | |
"Epoch 00171: val_loss improved from 0.00726 to 0.00709, saving model to best.model\n", | |
"0s - loss: 0.0693 - acc: 0.9813 - val_loss: 0.0071 - val_acc: 1.0000\n", | |
"Epoch 173/200\n", | |
"Epoch 00172: val_loss improved from 0.00709 to 0.00706, saving model to best.model\n", | |
"0s - loss: 0.0388 - acc: 0.9907 - val_loss: 0.0071 - val_acc: 1.0000\n", | |
"Epoch 174/200\n", | |
"Epoch 00173: val_loss improved from 0.00706 to 0.00686, saving model to best.model\n", | |
"0s - loss: 0.0676 - acc: 0.9907 - val_loss: 0.0069 - val_acc: 1.0000\n", | |
"Epoch 175/200\n", | |
"Epoch 00174: val_loss improved from 0.00686 to 0.00657, saving model to best.model\n", | |
"0s - loss: 0.0576 - acc: 0.9720 - val_loss: 0.0066 - val_acc: 1.0000\n", | |
"Epoch 176/200\n", | |
"Epoch 00175: val_loss improved from 0.00657 to 0.00647, saving model to best.model\n", | |
"0s - loss: 0.0536 - acc: 0.9813 - val_loss: 0.0065 - val_acc: 1.0000\n", | |
"Epoch 177/200\n", | |
"Epoch 00176: val_loss improved from 0.00647 to 0.00639, saving model to best.model\n", | |
"0s - loss: 0.0338 - acc: 0.9907 - val_loss: 0.0064 - val_acc: 1.0000\n", | |
"Epoch 178/200\n", | |
"Epoch 00177: val_loss did not improve\n", | |
"0s - loss: 0.0551 - acc: 0.9907 - val_loss: 0.0064 - val_acc: 1.0000\n", | |
"Epoch 179/200\n", | |
"Epoch 00178: val_loss improved from 0.00639 to 0.00637, saving model to best.model\n", | |
"0s - loss: 0.0248 - acc: 1.0000 - val_loss: 0.0064 - val_acc: 1.0000\n", | |
"Epoch 180/200\n", | |
"Epoch 00179: val_loss did not improve\n", | |
"0s - loss: 0.0514 - acc: 0.9813 - val_loss: 0.0065 - val_acc: 1.0000\n", | |
"Epoch 181/200\n", | |
"Epoch 00180: val_loss did not improve\n", | |
"0s - loss: 0.0704 - acc: 0.9533 - val_loss: 0.0066 - val_acc: 1.0000\n", | |
"Epoch 182/200\n", | |
"Epoch 00181: val_loss did not improve\n", | |
"0s - loss: 0.0535 - acc: 0.9813 - val_loss: 0.0065 - val_acc: 1.0000\n", | |
"Epoch 183/200\n", | |
"Epoch 00182: val_loss did not improve\n", | |
"0s - loss: 0.0765 - acc: 0.9720 - val_loss: 0.0066 - val_acc: 1.0000\n", | |
"Epoch 184/200\n", | |
"Epoch 00183: val_loss improved from 0.00637 to 0.00636, saving model to best.model\n", | |
"0s - loss: 0.0557 - acc: 0.9907 - val_loss: 0.0064 - val_acc: 1.0000\n", | |
"Epoch 185/200\n", | |
"Epoch 00184: val_loss improved from 0.00636 to 0.00614, saving model to best.model\n", | |
"0s - loss: 0.0330 - acc: 0.9813 - val_loss: 0.0061 - val_acc: 1.0000\n", | |
"Epoch 186/200\n", | |
"Epoch 00185: val_loss improved from 0.00614 to 0.00589, saving model to best.model\n", | |
"0s - loss: 0.0454 - acc: 0.9813 - val_loss: 0.0059 - val_acc: 1.0000\n", | |
"Epoch 187/200\n", | |
"Epoch 00186: val_loss improved from 0.00589 to 0.00557, saving model to best.model\n", | |
"0s - loss: 0.0548 - acc: 0.9813 - val_loss: 0.0056 - val_acc: 1.0000\n", | |
"Epoch 188/200\n", | |
"Epoch 00187: val_loss improved from 0.00557 to 0.00545, saving model to best.model\n", | |
"0s - loss: 0.0425 - acc: 0.9907 - val_loss: 0.0055 - val_acc: 1.0000\n", | |
"Epoch 189/200\n", | |
"Epoch 00188: val_loss improved from 0.00545 to 0.00534, saving model to best.model\n", | |
"0s - loss: 0.0258 - acc: 1.0000 - val_loss: 0.0053 - val_acc: 1.0000\n", | |
"Epoch 190/200\n", | |
"Epoch 00189: val_loss improved from 0.00534 to 0.00527, saving model to best.model\n", | |
"0s - loss: 0.0493 - acc: 0.9813 - val_loss: 0.0053 - val_acc: 1.0000\n", | |
"Epoch 191/200\n", | |
"Epoch 00190: val_loss improved from 0.00527 to 0.00515, saving model to best.model\n", | |
"0s - loss: 0.0452 - acc: 0.9813 - val_loss: 0.0052 - val_acc: 1.0000\n", | |
"Epoch 192/200\n", | |
"Epoch 00191: val_loss improved from 0.00515 to 0.00503, saving model to best.model\n", | |
"0s - loss: 0.0737 - acc: 0.9813 - val_loss: 0.0050 - val_acc: 1.0000\n", | |
"Epoch 193/200\n", | |
"Epoch 00192: val_loss improved from 0.00503 to 0.00492, saving model to best.model\n", | |
"0s - loss: 0.0372 - acc: 0.9907 - val_loss: 0.0049 - val_acc: 1.0000\n", | |
"Epoch 194/200\n", | |
"Epoch 00193: val_loss improved from 0.00492 to 0.00482, saving model to best.model\n", | |
"0s - loss: 0.0480 - acc: 0.9813 - val_loss: 0.0048 - val_acc: 1.0000\n", | |
"Epoch 195/200\n", | |
"Epoch 00194: val_loss improved from 0.00482 to 0.00479, saving model to best.model\n", | |
"0s - loss: 0.0448 - acc: 0.9813 - val_loss: 0.0048 - val_acc: 1.0000\n", | |
"Epoch 196/200\n", | |
"Epoch 00195: val_loss improved from 0.00479 to 0.00467, saving model to best.model\n", | |
"0s - loss: 0.0416 - acc: 0.9813 - val_loss: 0.0047 - val_acc: 1.0000\n", | |
"Epoch 197/200\n", | |
"Epoch 00196: val_loss improved from 0.00467 to 0.00441, saving model to best.model\n", | |
"0s - loss: 0.0892 - acc: 0.9813 - val_loss: 0.0044 - val_acc: 1.0000\n", | |
"Epoch 198/200\n", | |
"Epoch 00197: val_loss improved from 0.00441 to 0.00418, saving model to best.model\n", | |
"0s - loss: 0.0346 - acc: 0.9907 - val_loss: 0.0042 - val_acc: 1.0000\n", | |
"Epoch 199/200\n", | |
"Epoch 00198: val_loss improved from 0.00418 to 0.00404, saving model to best.model\n", | |
"0s - loss: 0.0553 - acc: 0.9907 - val_loss: 0.0040 - val_acc: 1.0000\n", | |
"Epoch 200/200\n", | |
"Epoch 00199: val_loss improved from 0.00404 to 0.00392, saving model to best.model\n", | |
"0s - loss: 0.0732 - acc: 0.9813 - val_loss: 0.0039 - val_acc: 1.0000\n", | |
"Three layer deep neural net\n" | |
] | |
}, | |
{ | |
"data": { | |
"text/html": [ | |
"<div>\n", | |
"<style>\n", | |
" .dataframe thead tr:only-child th {\n", | |
" text-align: right;\n", | |
" }\n", | |
"\n", | |
" .dataframe thead th {\n", | |
" text-align: left;\n", | |
" }\n", | |
"\n", | |
" .dataframe tbody tr th {\n", | |
" vertical-align: top;\n", | |
" }\n", | |
"</style>\n", | |
"<table border=\"1\" class=\"dataframe\">\n", | |
" <thead>\n", | |
" <tr style=\"text-align: right;\">\n", | |
" <th>Predicted</th>\n", | |
" <th>0</th>\n", | |
" <th>1</th>\n", | |
" <th>2</th>\n", | |
" <th>All</th>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>Actual</th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" </tr>\n", | |
" </thead>\n", | |
" <tbody>\n", | |
" <tr>\n", | |
" <th>0</th>\n", | |
" <td>19</td>\n", | |
" <td>0</td>\n", | |
" <td>0</td>\n", | |
" <td>19</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>1</th>\n", | |
" <td>0</td>\n", | |
" <td>23</td>\n", | |
" <td>0</td>\n", | |
" <td>23</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>2</th>\n", | |
" <td>0</td>\n", | |
" <td>0</td>\n", | |
" <td>17</td>\n", | |
" <td>17</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>All</th>\n", | |
" <td>19</td>\n", | |
" <td>23</td>\n", | |
" <td>17</td>\n", | |
" <td>59</td>\n", | |
" </tr>\n", | |
" </tbody>\n", | |
"</table>\n", | |
"</div>" | |
], | |
"text/plain": [ | |
"Predicted 0 1 2 All\n", | |
"Actual \n", | |
"0 19 0 0 19\n", | |
"1 0 23 0 23\n", | |
"2 0 0 17 17\n", | |
"All 19 23 17 59" | |
] | |
}, | |
"metadata": {}, | |
"output_type": "display_data" | |
}, | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Accuracy: 1.000\n", | |
"Boostrapped accuracy 95 % interval 1.0 1.0\n", | |
"\n", | |
"Xgboost\n" | |
] | |
}, | |
{ | |
"data": { | |
"text/html": [ | |
"<div>\n", | |
"<style>\n", | |
" .dataframe thead tr:only-child th {\n", | |
" text-align: right;\n", | |
" }\n", | |
"\n", | |
" .dataframe thead th {\n", | |
" text-align: left;\n", | |
" }\n", | |
"\n", | |
" .dataframe tbody tr th {\n", | |
" vertical-align: top;\n", | |
" }\n", | |
"</style>\n", | |
"<table border=\"1\" class=\"dataframe\">\n", | |
" <thead>\n", | |
" <tr style=\"text-align: right;\">\n", | |
" <th>Predicted</th>\n", | |
" <th>0</th>\n", | |
" <th>1</th>\n", | |
" <th>2</th>\n", | |
" <th>All</th>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>Actual</th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" </tr>\n", | |
" </thead>\n", | |
" <tbody>\n", | |
" <tr>\n", | |
" <th>0</th>\n", | |
" <td>18</td>\n", | |
" <td>1</td>\n", | |
" <td>0</td>\n", | |
" <td>19</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>1</th>\n", | |
" <td>0</td>\n", | |
" <td>23</td>\n", | |
" <td>0</td>\n", | |
" <td>23</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>2</th>\n", | |
" <td>0</td>\n", | |
" <td>0</td>\n", | |
" <td>17</td>\n", | |
" <td>17</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>All</th>\n", | |
" <td>18</td>\n", | |
" <td>24</td>\n", | |
" <td>17</td>\n", | |
" <td>59</td>\n", | |
" </tr>\n", | |
" </tbody>\n", | |
"</table>\n", | |
"</div>" | |
], | |
"text/plain": [ | |
"Predicted 0 1 2 All\n", | |
"Actual \n", | |
"0 18 1 0 19\n", | |
"1 0 23 0 23\n", | |
"2 0 0 17 17\n", | |
"All 18 24 17 59" | |
] | |
}, | |
"metadata": {}, | |
"output_type": "display_data" | |
}, | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Accuracy: 0.983\n", | |
"Boostrapped accuracy 95 % interval 0.931034482759 - 1.0\n" | |
] | |
} | |
], | |
"source": [ | |
"compare_on_dataset(\n", | |
" 'https://gist.githubusercontent.com/tijptjik/9408623/raw/b237fa5848349a14a14e5d4107dc7897c21951f5/wine.csv',\n", | |
" target_variable='wine',\n", | |
" lr=0.001\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## German Credit Data (n=1000)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 16, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Train on 603 samples, validate on 67 samples\n", | |
"Epoch 1/200\n", | |
"Epoch 00000: val_loss improved from inf to 0.63623, saving model to best.model\n", | |
"0s - loss: 0.6892 - acc: 0.5390 - val_loss: 0.6362 - val_acc: 0.6716\n", | |
"Epoch 2/200\n", | |
"Epoch 00001: val_loss improved from 0.63623 to 0.60586, saving model to best.model\n", | |
"0s - loss: 0.6346 - acc: 0.6517 - val_loss: 0.6059 - val_acc: 0.6716\n", | |
"Epoch 3/200\n", | |
"Epoch 00002: val_loss improved from 0.60586 to 0.59736, saving model to best.model\n", | |
"0s - loss: 0.6304 - acc: 0.6799 - val_loss: 0.5974 - val_acc: 0.6716\n", | |
"Epoch 4/200\n", | |
"Epoch 00003: val_loss improved from 0.59736 to 0.59252, saving model to best.model\n", | |
"0s - loss: 0.6352 - acc: 0.6833 - val_loss: 0.5925 - val_acc: 0.6716\n", | |
"Epoch 5/200\n", | |
"Epoch 00004: val_loss improved from 0.59252 to 0.58791, saving model to best.model\n", | |
"0s - loss: 0.6156 - acc: 0.6866 - val_loss: 0.5879 - val_acc: 0.6716\n", | |
"Epoch 6/200\n", | |
"Epoch 00005: val_loss improved from 0.58791 to 0.58424, saving model to best.model\n", | |
"0s - loss: 0.6011 - acc: 0.6866 - val_loss: 0.5842 - val_acc: 0.6716\n", | |
"Epoch 7/200\n", | |
"Epoch 00006: val_loss improved from 0.58424 to 0.58239, saving model to best.model\n", | |
"0s - loss: 0.6115 - acc: 0.6849 - val_loss: 0.5824 - val_acc: 0.6716\n", | |
"Epoch 8/200\n", | |
"Epoch 00007: val_loss improved from 0.58239 to 0.58225, saving model to best.model\n", | |
"0s - loss: 0.6013 - acc: 0.6866 - val_loss: 0.5822 - val_acc: 0.6716\n", | |
"Epoch 9/200\n", | |
"Epoch 00008: val_loss improved from 0.58225 to 0.58025, saving model to best.model\n", | |
"0s - loss: 0.6028 - acc: 0.6866 - val_loss: 0.5802 - val_acc: 0.6716\n", | |
"Epoch 10/200\n", | |
"Epoch 00009: val_loss improved from 0.58025 to 0.57538, saving model to best.model\n", | |
"0s - loss: 0.5975 - acc: 0.6866 - val_loss: 0.5754 - val_acc: 0.6716\n", | |
"Epoch 11/200\n", | |
"Epoch 00010: val_loss improved from 0.57538 to 0.56945, saving model to best.model\n", | |
"0s - loss: 0.5967 - acc: 0.6882 - val_loss: 0.5694 - val_acc: 0.6716\n", | |
"Epoch 12/200\n", | |
"Epoch 00011: val_loss improved from 0.56945 to 0.56285, saving model to best.model\n", | |
"0s - loss: 0.5797 - acc: 0.6849 - val_loss: 0.5628 - val_acc: 0.6716\n", | |
"Epoch 13/200\n", | |
"Epoch 00012: val_loss improved from 0.56285 to 0.55478, saving model to best.model\n", | |
"0s - loss: 0.5745 - acc: 0.6833 - val_loss: 0.5548 - val_acc: 0.6716\n", | |
"Epoch 14/200\n", | |
"Epoch 00013: val_loss improved from 0.55478 to 0.54548, saving model to best.model\n", | |
"0s - loss: 0.5722 - acc: 0.6866 - val_loss: 0.5455 - val_acc: 0.6716\n", | |
"Epoch 15/200\n", | |
"Epoch 00014: val_loss improved from 0.54548 to 0.53926, saving model to best.model\n", | |
"0s - loss: 0.5837 - acc: 0.6833 - val_loss: 0.5393 - val_acc: 0.6716\n", | |
"Epoch 16/200\n", | |
"Epoch 00015: val_loss improved from 0.53926 to 0.53601, saving model to best.model\n", | |
"0s - loss: 0.5579 - acc: 0.6949 - val_loss: 0.5360 - val_acc: 0.6716\n", | |
"Epoch 17/200\n", | |
"Epoch 00016: val_loss improved from 0.53601 to 0.53322, saving model to best.model\n", | |
"0s - loss: 0.5764 - acc: 0.6965 - val_loss: 0.5332 - val_acc: 0.7015\n", | |
"Epoch 18/200\n", | |
"Epoch 00017: val_loss improved from 0.53322 to 0.52674, saving model to best.model\n", | |
"0s - loss: 0.5645 - acc: 0.6915 - val_loss: 0.5267 - val_acc: 0.7164\n", | |
"Epoch 19/200\n", | |
"Epoch 00018: val_loss improved from 0.52674 to 0.51517, saving model to best.model\n", | |
"0s - loss: 0.5499 - acc: 0.7015 - val_loss: 0.5152 - val_acc: 0.7164\n", | |
"Epoch 20/200\n", | |
"Epoch 00019: val_loss improved from 0.51517 to 0.50623, saving model to best.model\n", | |
"0s - loss: 0.5648 - acc: 0.7015 - val_loss: 0.5062 - val_acc: 0.7612\n", | |
"Epoch 21/200\n", | |
"Epoch 00020: val_loss improved from 0.50623 to 0.50254, saving model to best.model\n", | |
"0s - loss: 0.5519 - acc: 0.7032 - val_loss: 0.5025 - val_acc: 0.8060\n", | |
"Epoch 22/200\n", | |
"Epoch 00021: val_loss improved from 0.50254 to 0.49712, saving model to best.model\n", | |
"0s - loss: 0.5467 - acc: 0.7032 - val_loss: 0.4971 - val_acc: 0.7910\n", | |
"Epoch 23/200\n", | |
"Epoch 00022: val_loss improved from 0.49712 to 0.48858, saving model to best.model\n", | |
"0s - loss: 0.5653 - acc: 0.7164 - val_loss: 0.4886 - val_acc: 0.7910\n", | |
"Epoch 24/200\n", | |
"Epoch 00023: val_loss improved from 0.48858 to 0.48443, saving model to best.model\n", | |
"0s - loss: 0.5328 - acc: 0.7247 - val_loss: 0.4844 - val_acc: 0.7761\n", | |
"Epoch 25/200\n", | |
"Epoch 00024: val_loss improved from 0.48443 to 0.48158, saving model to best.model\n", | |
"0s - loss: 0.5436 - acc: 0.6932 - val_loss: 0.4816 - val_acc: 0.7910\n", | |
"Epoch 26/200\n", | |
"Epoch 00025: val_loss improved from 0.48158 to 0.47740, saving model to best.model\n", | |
"0s - loss: 0.5315 - acc: 0.7264 - val_loss: 0.4774 - val_acc: 0.8060\n", | |
"Epoch 27/200\n", | |
"Epoch 00026: val_loss improved from 0.47740 to 0.47289, saving model to best.model\n", | |
"0s - loss: 0.5282 - acc: 0.7330 - val_loss: 0.4729 - val_acc: 0.8358\n", | |
"Epoch 28/200\n", | |
"Epoch 00027: val_loss improved from 0.47289 to 0.47229, saving model to best.model\n", | |
"0s - loss: 0.5471 - acc: 0.7181 - val_loss: 0.4723 - val_acc: 0.8358\n", | |
"Epoch 29/200\n", | |
"Epoch 00028: val_loss improved from 0.47229 to 0.47107, saving model to best.model\n", | |
"0s - loss: 0.5352 - acc: 0.7347 - val_loss: 0.4711 - val_acc: 0.8209\n", | |
"Epoch 30/200\n", | |
"Epoch 00029: val_loss improved from 0.47107 to 0.46866, saving model to best.model\n", | |
"0s - loss: 0.5331 - acc: 0.7413 - val_loss: 0.4687 - val_acc: 0.8209\n", | |
"Epoch 31/200\n", | |
"Epoch 00030: val_loss improved from 0.46866 to 0.46637, saving model to best.model\n", | |
"0s - loss: 0.5511 - acc: 0.7297 - val_loss: 0.4664 - val_acc: 0.8358\n", | |
"Epoch 32/200\n", | |
"Epoch 00031: val_loss improved from 0.46637 to 0.46249, saving model to best.model\n", | |
"0s - loss: 0.5391 - acc: 0.7264 - val_loss: 0.4625 - val_acc: 0.8209\n", | |
"Epoch 33/200\n", | |
"Epoch 00032: val_loss improved from 0.46249 to 0.45833, saving model to best.model\n", | |
"0s - loss: 0.5407 - acc: 0.7313 - val_loss: 0.4583 - val_acc: 0.8209\n", | |
"Epoch 34/200\n", | |
"Epoch 00033: val_loss improved from 0.45833 to 0.45635, saving model to best.model\n", | |
"0s - loss: 0.5371 - acc: 0.7347 - val_loss: 0.4563 - val_acc: 0.8060\n", | |
"Epoch 35/200\n", | |
"Epoch 00034: val_loss improved from 0.45635 to 0.45539, saving model to best.model\n", | |
"0s - loss: 0.5345 - acc: 0.7463 - val_loss: 0.4554 - val_acc: 0.8209\n", | |
"Epoch 36/200\n", | |
"Epoch 00035: val_loss did not improve\n", | |
"0s - loss: 0.5323 - acc: 0.7446 - val_loss: 0.4570 - val_acc: 0.8060\n", | |
"Epoch 37/200\n", | |
"Epoch 00036: val_loss did not improve\n", | |
"0s - loss: 0.5295 - acc: 0.7430 - val_loss: 0.4602 - val_acc: 0.8209\n", | |
"Epoch 38/200\n", | |
"Epoch 00037: val_loss did not improve\n", | |
"0s - loss: 0.5429 - acc: 0.7463 - val_loss: 0.4625 - val_acc: 0.8358\n", | |
"Epoch 39/200\n", | |
"Epoch 00038: val_loss did not improve\n", | |
"0s - loss: 0.5251 - acc: 0.7347 - val_loss: 0.4619 - val_acc: 0.8358\n", | |
"Epoch 40/200\n", | |
"Epoch 00039: val_loss did not improve\n", | |
"0s - loss: 0.5202 - acc: 0.7396 - val_loss: 0.4570 - val_acc: 0.8060\n", | |
"Epoch 41/200\n", | |
"Epoch 00040: val_loss improved from 0.45539 to 0.44971, saving model to best.model\n", | |
"0s - loss: 0.5205 - acc: 0.7463 - val_loss: 0.4497 - val_acc: 0.8060\n", | |
"Epoch 42/200\n", | |
"Epoch 00041: val_loss improved from 0.44971 to 0.44405, saving model to best.model\n", | |
"0s - loss: 0.5125 - acc: 0.7446 - val_loss: 0.4440 - val_acc: 0.8060\n", | |
"Epoch 43/200\n", | |
"Epoch 00042: val_loss improved from 0.44405 to 0.44256, saving model to best.model\n", | |
"0s - loss: 0.5237 - acc: 0.7512 - val_loss: 0.4426 - val_acc: 0.8060\n", | |
"Epoch 44/200\n", | |
"Epoch 00043: val_loss improved from 0.44256 to 0.43950, saving model to best.model\n", | |
"0s - loss: 0.5266 - acc: 0.7579 - val_loss: 0.4395 - val_acc: 0.8060\n", | |
"Epoch 45/200\n", | |
"Epoch 00044: val_loss improved from 0.43950 to 0.43696, saving model to best.model\n", | |
"0s - loss: 0.5075 - acc: 0.7529 - val_loss: 0.4370 - val_acc: 0.8209\n", | |
"Epoch 46/200\n", | |
"Epoch 00045: val_loss improved from 0.43696 to 0.43560, saving model to best.model\n", | |
"0s - loss: 0.5221 - acc: 0.7496 - val_loss: 0.4356 - val_acc: 0.8358\n", | |
"Epoch 47/200\n", | |
"Epoch 00046: val_loss improved from 0.43560 to 0.43394, saving model to best.model\n", | |
"0s - loss: 0.5093 - acc: 0.7496 - val_loss: 0.4339 - val_acc: 0.8358\n", | |
"Epoch 48/200\n", | |
"Epoch 00047: val_loss improved from 0.43394 to 0.43271, saving model to best.model\n", | |
"0s - loss: 0.5212 - acc: 0.7430 - val_loss: 0.4327 - val_acc: 0.8358\n", | |
"Epoch 49/200\n", | |
"Epoch 00048: val_loss did not improve\n", | |
"0s - loss: 0.5264 - acc: 0.7562 - val_loss: 0.4330 - val_acc: 0.8507\n", | |
"Epoch 50/200\n", | |
"Epoch 00049: val_loss did not improve\n", | |
"0s - loss: 0.5206 - acc: 0.7396 - val_loss: 0.4330 - val_acc: 0.8358\n", | |
"Epoch 51/200\n", | |
"Epoch 00050: val_loss improved from 0.43271 to 0.43132, saving model to best.model\n", | |
"0s - loss: 0.5230 - acc: 0.7413 - val_loss: 0.4313 - val_acc: 0.8507\n", | |
"Epoch 52/200\n", | |
"Epoch 00051: val_loss did not improve\n", | |
"0s - loss: 0.5128 - acc: 0.7446 - val_loss: 0.4316 - val_acc: 0.8209\n", | |
"Epoch 53/200\n", | |
"Epoch 00052: val_loss did not improve\n", | |
"0s - loss: 0.5108 - acc: 0.7529 - val_loss: 0.4327 - val_acc: 0.8060\n", | |
"Epoch 54/200\n", | |
"Epoch 00053: val_loss did not improve\n", | |
"0s - loss: 0.4953 - acc: 0.7761 - val_loss: 0.4373 - val_acc: 0.8358\n", | |
"Epoch 55/200\n", | |
"Epoch 00054: val_loss did not improve\n", | |
"0s - loss: 0.5108 - acc: 0.7562 - val_loss: 0.4388 - val_acc: 0.8507\n", | |
"Epoch 56/200\n", | |
"Epoch 00055: val_loss did not improve\n", | |
"0s - loss: 0.5131 - acc: 0.7645 - val_loss: 0.4367 - val_acc: 0.8507\n", | |
"Epoch 57/200\n", | |
"Epoch 00056: val_loss improved from 0.43132 to 0.42840, saving model to best.model\n", | |
"0s - loss: 0.5039 - acc: 0.7512 - val_loss: 0.4284 - val_acc: 0.8358\n", | |
"Epoch 58/200\n", | |
"Epoch 00057: val_loss improved from 0.42840 to 0.42236, saving model to best.model\n", | |
"0s - loss: 0.5043 - acc: 0.7512 - val_loss: 0.4224 - val_acc: 0.8358\n", | |
"Epoch 59/200\n", | |
"Epoch 00058: val_loss improved from 0.42236 to 0.41835, saving model to best.model\n", | |
"0s - loss: 0.4985 - acc: 0.7728 - val_loss: 0.4183 - val_acc: 0.8358\n", | |
"Epoch 60/200\n", | |
"Epoch 00059: val_loss improved from 0.41835 to 0.41351, saving model to best.model\n", | |
"0s - loss: 0.4980 - acc: 0.7629 - val_loss: 0.4135 - val_acc: 0.8358\n", | |
"Epoch 61/200\n", | |
"Epoch 00060: val_loss improved from 0.41351 to 0.41146, saving model to best.model\n", | |
"0s - loss: 0.4996 - acc: 0.7463 - val_loss: 0.4115 - val_acc: 0.8358\n", | |
"Epoch 62/200\n", | |
"Epoch 00061: val_loss improved from 0.41146 to 0.41029, saving model to best.model\n", | |
"0s - loss: 0.4929 - acc: 0.7745 - val_loss: 0.4103 - val_acc: 0.8358\n", | |
"Epoch 63/200\n", | |
"Epoch 00062: val_loss did not improve\n", | |
"0s - loss: 0.4964 - acc: 0.7579 - val_loss: 0.4110 - val_acc: 0.8358\n", | |
"Epoch 64/200\n", | |
"Epoch 00063: val_loss did not improve\n", | |
"0s - loss: 0.4795 - acc: 0.7612 - val_loss: 0.4119 - val_acc: 0.8358\n", | |
"Epoch 65/200\n", | |
"Epoch 00064: val_loss did not improve\n", | |
"0s - loss: 0.4963 - acc: 0.7645 - val_loss: 0.4146 - val_acc: 0.8358\n", | |
"Epoch 66/200\n", | |
"Epoch 00065: val_loss did not improve\n", | |
"0s - loss: 0.4960 - acc: 0.7479 - val_loss: 0.4177 - val_acc: 0.8358\n", | |
"Epoch 67/200\n", | |
"Epoch 00066: val_loss did not improve\n", | |
"0s - loss: 0.5035 - acc: 0.7363 - val_loss: 0.4210 - val_acc: 0.8209\n", | |
"Epoch 68/200\n", | |
"Epoch 00067: val_loss did not improve\n", | |
"0s - loss: 0.4921 - acc: 0.7595 - val_loss: 0.4209 - val_acc: 0.8358\n", | |
"Three layer deep neural net\n" | |
] | |
}, | |
{ | |
"data": { | |
"text/html": [ | |
"<div>\n", | |
"<style>\n", | |
" .dataframe thead tr:only-child th {\n", | |
" text-align: right;\n", | |
" }\n", | |
"\n", | |
" .dataframe thead th {\n", | |
" text-align: left;\n", | |
" }\n", | |
"\n", | |
" .dataframe tbody tr th {\n", | |
" vertical-align: top;\n", | |
" }\n", | |
"</style>\n", | |
"<table border=\"1\" class=\"dataframe\">\n", | |
" <thead>\n", | |
" <tr style=\"text-align: right;\">\n", | |
" <th>Predicted</th>\n", | |
" <th>0</th>\n", | |
" <th>1</th>\n", | |
" <th>All</th>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>Actual</th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" </tr>\n", | |
" </thead>\n", | |
" <tbody>\n", | |
" <tr>\n", | |
" <th>0</th>\n", | |
" <td>48</td>\n", | |
" <td>40</td>\n", | |
" <td>88</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>1</th>\n", | |
" <td>32</td>\n", | |
" <td>210</td>\n", | |
" <td>242</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>All</th>\n", | |
" <td>80</td>\n", | |
" <td>250</td>\n", | |
" <td>330</td>\n", | |
" </tr>\n", | |
" </tbody>\n", | |
"</table>\n", | |
"</div>" | |
], | |
"text/plain": [ | |
"Predicted 0 1 All\n", | |
"Actual \n", | |
"0 48 40 88\n", | |
"1 32 210 242\n", | |
"All 80 250 330" | |
] | |
}, | |
"metadata": {}, | |
"output_type": "display_data" | |
}, | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Accuracy: 0.782\n", | |
"Boostrapped accuracy 95 % interval 0.727272727273 0.836363636364\n", | |
"\n", | |
"Xgboost\n" | |
] | |
}, | |
{ | |
"data": { | |
"text/html": [ | |
"<div>\n", | |
"<style>\n", | |
" .dataframe thead tr:only-child th {\n", | |
" text-align: right;\n", | |
" }\n", | |
"\n", | |
" .dataframe thead th {\n", | |
" text-align: left;\n", | |
" }\n", | |
"\n", | |
" .dataframe tbody tr th {\n", | |
" vertical-align: top;\n", | |
" }\n", | |
"</style>\n", | |
"<table border=\"1\" class=\"dataframe\">\n", | |
" <thead>\n", | |
" <tr style=\"text-align: right;\">\n", | |
" <th>Predicted</th>\n", | |
" <th>0</th>\n", | |
" <th>1</th>\n", | |
" <th>All</th>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>Actual</th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" </tr>\n", | |
" </thead>\n", | |
" <tbody>\n", | |
" <tr>\n", | |
" <th>0</th>\n", | |
" <td>51</td>\n", | |
" <td>37</td>\n", | |
" <td>88</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>1</th>\n", | |
" <td>33</td>\n", | |
" <td>209</td>\n", | |
" <td>242</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>All</th>\n", | |
" <td>84</td>\n", | |
" <td>246</td>\n", | |
" <td>330</td>\n", | |
" </tr>\n", | |
" </tbody>\n", | |
"</table>\n", | |
"</div>" | |
], | |
"text/plain": [ | |
"Predicted 0 1 All\n", | |
"Actual \n", | |
"0 51 37 88\n", | |
"1 33 209 242\n", | |
"All 84 246 330" | |
] | |
}, | |
"metadata": {}, | |
"output_type": "display_data" | |
}, | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Accuracy: 0.788\n", | |
"Boostrapped accuracy 95 % interval 0.733333333333 - 0.842424242424\n" | |
] | |
} | |
], | |
"source": [ | |
"compare_on_dataset(\n", | |
" 'https://onlinecourses.science.psu.edu/stat857/sites/onlinecourses.science.psu.edu.stat857/files/german_credit.csv',\n", | |
" target_variable='creditability',\n", | |
" lr=0.001,\n", | |
" patience=5\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [] | |
} | |
], | |
"metadata": { | |
"anaconda-cloud": {}, | |
"kernelspec": { | |
"display_name": "Python [default]", | |
"language": "python", | |
"name": "python2" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 2 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython2", | |
"version": "2.7.12" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 2 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment