Skip to content

Instantly share code, notes, and snippets.

@renxida
Created November 19, 2018 16:07
Show Gist options
  • Save renxida/0f708665aeff7bca2fdbbbae9a147462 to your computer and use it in GitHub Desktop.
Save renxida/0f708665aeff7bca2fdbbbae9a147462 to your computer and use it in GitHub Desktop.
replicate_talos_float_err.ipynb
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "from keras.models import Model\nfrom keras.layers import Input, Dense, Conv1D, Flatten, Concatenate\nfrom keras import regularizers\nimport logging\nlogger = logging.getLogger('replicate_talos_float_hyperparameter_error')",
"execution_count": 1,
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": "Using TensorFlow backend.\n"
}
]
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "import talos as ta\nimport keras\nimport numpy as np",
"execution_count": 2,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "def make_net(input_layer, params):\n net = input_layer\n\n if params.get('conv_count', 0) !=0:\n for _ in range(params['conv_count']):\n newConv = Conv1D(\n activation='relu',\n filters=params['conv_filters'],\n kernel_size=params['conv_kernel_size'],\n kernel_regularizer=regularizers.l2(float(params['l2reg'])))\n net = newConv(net)\n # optional pooling\n if params.get('pool_size', None) is not None:\n newPool = keras.layers.MaxPooling1D(\n pool_size=params['pool_size'],\n strides=params.get(\n 'pool_strides',\n params['pool_size']),\n )\n net = newPool(net)\n\n net = Flatten()(net)\n \n if params.get('dense_count', 0) !=0:\n for dense_num in range(params['dense_count']):\n newDense = Dense(\n units=params['dense_units'],\n activation='relu',\n kernel_regularizer=regularizers.l2(float(params['l2reg'])),\n )\n net = newDense(net)\n\n return net\n\ndef make_optimizer(params):\n opzr = keras.optimizers.Adam(**params.get('optimizer', {}))\n return opzr\n\nimport keras.backend as K\ndef model(params):\n \n \n def metric_correlation(y_true, y_pred):\n logger.warn(f'[metric_correlation] y_true size: {y_true.shape}, y_predy size: {y_pred.shape}')\n\n def m(x, w):\n \"\"\"Weighted Mean\"\"\"\n return K.sum(x*w) / K.sum(w)\n def cov(x, y, w):\n \"\"\"Weighted Covariance\"\"\"\n return K.sum(w* (x - m(x, w))*(y - m(y, w)))/ K.sum(w)\n\n def corr(x, y, w):\n \"\"\"Weighted Correlation\"\"\"\n return cov(x, y, w) / K.sqrt(cov(x, x, w) * cov(y, y, w))\n print(y_true.get_shape())\n return corr(y_true, y_pred, K.ones_like(y_true))\n inputs = []\n\n subnets = []\n\n\n inpt = Input(shape=(30,), dtype='float32', name='returns_input')\n inputs.append(inpt)\n\n net = make_net(inpt, params)\n \n condenser = Dense(units=1, activation=None, use_bias=False)\n net = condenser(net)\n \n model = Model(inputs=inputs, outputs = net)\n\n opzr = make_optimizer(params)\n model.compile(\n loss='mean_squared_error',\n optimizer=opzr,\n metrics=[metric_correlation])\n return model",
"execution_count": 3,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "def model_ta(x_train, y_train, x_val, y_val, params):\n print(params)\n # replace the hyperparameter inputs with references to params dictionary \n mdl = model(params)\n \n history_callback = mdl.fit(x_train, y_train,\n batch_size=params['batch_size'],\n epochs=params['epochs'],\n verbose=1,\n validation_data=[x_val, y_val])\n history_callback.history['val_acc']=[42]\n # modify the output model\n return history_callback, mdl",
"execution_count": 4,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "",
"execution_count": null,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "## Compare this:",
"execution_count": 5,
"outputs": []
},
{
"metadata": {
"scrolled": true,
"trusted": true
},
"cell_type": "code",
"source": "h = ta.Scan(x=np.random.normal(loc=0.0, scale=1.0, size=(5000, 30)),\n y=np.random.normal(loc=0.0, scale=1.0, size=(5000, 1)), \n params={\n 'dense_count': [1, 2, 3],\n 'dense_units': [50, 100, 150, 200],\n 'conv_count': [0],\n 'l2reg': [1, 100, 1000],\n 'epochs':[4],\n 'batch_size':[32]\n },\n model=model_ta,\n grid_downsample=0.1)",
"execution_count": 6,
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": " 0%| | 0/3 [00:00<?, ?it/s][metric_correlation] y_true size: (?, ?), y_predy size: (?, 1)\n"
},
{
"name": "stdout",
"output_type": "stream",
"text": "{'dense_count': 2, 'dense_units': 100, 'conv_count': 0, 'l2reg': 1000, 'epochs': 4, 'batch_size': 32}\n(?, ?)\nTrain on 3500 samples, validate on 1500 samples\nEpoch 1/4\n3500/3500 [==============================] - 1s 272us/step - loss: 66257.8525 - metric_correlation: -0.0098 - val_loss: 19129.0070 - val_metric_correlation: 0.0196\nEpoch 2/4\n3500/3500 [==============================] - 0s 47us/step - loss: 7494.6176 - metric_correlation: -0.0058 - val_loss: 1677.6988 - val_metric_correlation: 0.0192\nEpoch 3/4\n3500/3500 [==============================] - 0s 47us/step - loss: 589.8285 - metric_correlation: 0.0371 - val_loss: 104.9364 - val_metric_correlation: -0.0216\nEpoch 4/4\n3500/3500 [==============================] - 0s 47us/step - loss: 34.4601 - metric_correlation: 0.0458 - val_loss: 5.7446 - val_metric_correlation: -0.0188\n"
},
{
"name": "stderr",
"output_type": "stream",
"text": "\r 33%|███▎ | 1/3 [00:01<00:03, 1.80s/it]"
},
{
"name": "stdout",
"output_type": "stream",
"text": "{'dense_count': 3, 'dense_units': 50, 'conv_count': 0, 'l2reg': 100, 'epochs': 4, 'batch_size': 32}\n"
},
{
"name": "stderr",
"output_type": "stream",
"text": "[metric_correlation] y_true size: (?, ?), y_predy size: (?, 1)\n"
},
{
"name": "stdout",
"output_type": "stream",
"text": "(?, ?)\nTrain on 3500 samples, validate on 1500 samples\nEpoch 1/4\n3500/3500 [==============================] - 0s 111us/step - loss: 7383.3418 - metric_correlation: -0.0154 - val_loss: 3161.2359 - val_metric_correlation: 0.0060\nEpoch 2/4\n3500/3500 [==============================] - 0s 54us/step - loss: 1576.2417 - metric_correlation: 0.0149 - val_loss: 587.0849 - val_metric_correlation: -0.0154\nEpoch 3/4\n3500/3500 [==============================] - 0s 53us/step - loss: 267.8316 - metric_correlation: -0.0040 - val_loss: 84.4821 - val_metric_correlation: 0.0063\nEpoch 4/4\n3500/3500 [==============================] - 0s 53us/step - loss: 35.6537 - metric_correlation: 0.0049 - val_loss: 10.0422 - val_metric_correlation: 0.0116\n"
},
{
"name": "stderr",
"output_type": "stream",
"text": "\r 67%|██████▋ | 2/3 [00:03<00:01, 1.64s/it]"
},
{
"name": "stdout",
"output_type": "stream",
"text": "{'dense_count': 1, 'dense_units': 100, 'conv_count': 0, 'l2reg': 1, 'epochs': 4, 'batch_size': 32}\n"
},
{
"name": "stderr",
"output_type": "stream",
"text": "[metric_correlation] y_true size: (?, ?), y_predy size: (?, 1)\n"
},
{
"name": "stdout",
"output_type": "stream",
"text": "(?, ?)\nTrain on 3500 samples, validate on 1500 samples\nEpoch 1/4\n3500/3500 [==============================] - 0s 75us/step - loss: 24.6264 - metric_correlation: -0.0086 - val_loss: 9.5593 - val_metric_correlation: 5.8482e-04\nEpoch 2/4\n3500/3500 [==============================] - 0s 40us/step - loss: 4.7820 - metric_correlation: -9.4352e-04 - val_loss: 2.1288 - val_metric_correlation: 0.0042\nEpoch 3/4\n3500/3500 [==============================] - 0s 39us/step - loss: 1.4306 - metric_correlation: 0.0142 - val_loss: 1.1252 - val_metric_correlation: 0.0091\nEpoch 4/4\n3500/3500 [==============================] - 0s 39us/step - loss: 1.0338 - metric_correlation: 0.0243 - val_loss: 1.0342 - val_metric_correlation: -0.0225\n"
},
{
"name": "stderr",
"output_type": "stream",
"text": "\r100%|██████████| 3/3 [00:03<00:00, 1.42s/it]"
},
{
"name": "stdout",
"output_type": "stream",
"text": "Scan Finished!\n"
},
{
"name": "stderr",
"output_type": "stream",
"text": "\n"
}
]
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "## and this:",
"execution_count": 7,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "h = ta.Scan(x=np.random.normal(loc=0.0, scale=1.0, size=(5000, 30)),\n y=np.random.normal(loc=0.0, scale=1.0, size=(5000, 1)), \n params={\n 'dense_count': [1, 2, 3],\n 'dense_units': [50, 100, 150, 200],\n 'conv_count': [0],\n 'l2reg': [1.0, 100, 1000],\n 'epochs':[4],\n 'batch_size':[32]\n },\n model=model_ta,\n grid_downsample=0.1)",
"execution_count": 8,
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": "\r 0%| | 0/3 [00:00<?, ?it/s]"
},
{
"name": "stdout",
"output_type": "stream",
"text": "{'dense_count': 1.0, 'dense_units': 100.0, 'conv_count': 0.0, 'l2reg': 1.0, 'epochs': 4.0, 'batch_size': 32.0}\n"
},
{
"ename": "TalosReturnError",
"evalue": "Make sure that input model returns 'out, model' where out is history object from model.fit()",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/talos/scan/scan_round.py\u001b[0m in \u001b[0;36mscan_round\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 32\u001b[0;31m \u001b[0m_hr_out\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeras_model\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mingest_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 33\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mTypeError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0merr\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/talos/model/ingest_model.py\u001b[0m in \u001b[0;36mingest_model\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0my_val\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m self.round_params)\n\u001b[0m",
"\u001b[0;32m<ipython-input-4-f8f301590a82>\u001b[0m in \u001b[0;36mmodel_ta\u001b[0;34m(x_train, y_train, x_val, y_val, params)\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;31m# replace the hyperparameter inputs with references to params dictionary\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mmdl\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-3-8bdda1b805f8>\u001b[0m in \u001b[0;36mmodel\u001b[0;34m(params)\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 66\u001b[0;31m \u001b[0mnet\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmake_net\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minpt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 67\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-3-8bdda1b805f8>\u001b[0m in \u001b[0;36mmake_net\u001b[0;34m(input_layer, params)\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'dense_count'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 25\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mdense_num\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'dense_count'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 26\u001b[0m newDense = Dense(\n",
"\u001b[0;31mTypeError\u001b[0m: 'numpy.float64' object cannot be interpreted as an integer",
"\nDuring handling of the above exception, another exception occurred:\n",
"\u001b[0;31mTalosReturnError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-8-33918308fd1b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 10\u001b[0m },\n\u001b[1;32m 11\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmodel_ta\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m grid_downsample=0.1)\n\u001b[0m",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/talos/scan/Scan.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, x, y, params, model, dataset_name, experiment_no, x_val, y_val, val_split, shuffle, round_limit, grid_downsample, random_method, seed, search_method, reduction_method, reduction_interval, reduction_window, reduction_threshold, reduction_metric, reduce_loss, last_epoch_value, clear_tf_session, disable_progress_bar, print_params, debug)\u001b[0m\n\u001b[1;32m 161\u001b[0m \u001b[0;31m# input parameters section ends\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 162\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 163\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_null\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mruntime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 164\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 165\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mruntime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/talos/scan/Scan.py\u001b[0m in \u001b[0;36mruntime\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 166\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 167\u001b[0m \u001b[0mself\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mscan_prepare\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 168\u001b[0;31m \u001b[0mself\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mscan_run\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/talos/scan/scan_run.py\u001b[0m in \u001b[0;36mscan_run\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 18\u001b[0m disable=self.disable_progress_bar)\n\u001b[1;32m 19\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparam_log\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m \u001b[0mself\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mscan_round\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 21\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpbar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpbar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/talos/scan/scan_round.py\u001b[0m in \u001b[0;36mscan_round\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mTalosTypeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Activation should be as object and not string in params\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 37\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mTalosReturnError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Make sure that input model returns 'out, model' where out is history object from model.fit()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;31m# set end time and log\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mTalosReturnError\u001b[0m: Make sure that input model returns 'out, model' where out is history object from model.fit()"
]
}
]
},
{
"metadata": {},
"cell_type": "markdown",
"source": "note that printing params gives a bunch of xxx.0s, indicating a float type."
}
],
"metadata": {
"gist": {
"id": "",
"data": {
"description": "replicate_talos_float_err.ipynb",
"public": true
}
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3",
"language": "python"
},
"language_info": {
"name": "python",
"version": "3.6.5",
"mimetype": "text/x-python",
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"pygments_lexer": "ipython3",
"nbconvert_exporter": "python",
"file_extension": ".py"
},
"toc": {
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"base_numbering": 1,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": true
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment