Skip to content

Instantly share code, notes, and snippets.

@renxida
Created November 19, 2018 16:12
Show Gist options
  • Save renxida/fba8417e04b1c9094bf924db0a2938ec to your computer and use it in GitHub Desktop.
Save renxida/fba8417e04b1c9094bf924db0a2938ec to your computer and use it in GitHub Desktop.
test_custom_metric_corr.ipynb
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "from keras.models import Model\nfrom keras.layers import Input, Dense, Conv1D, Flatten, Concatenate\nfrom keras import regularizers\nimport logging\nlogger = logging.getLogger('replicate_talos_float_hyperparameter_error')",
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"text": "Using TensorFlow backend.\n",
"name": "stderr"
}
]
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "import talos as ta\nimport keras\nimport numpy as np",
"execution_count": 2,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "def make_net(input_layer, params):\n net = input_layer\n\n if params.get('conv_count', 0) !=0:\n for _ in range(params['conv_count']):\n newConv = Conv1D(\n activation='relu',\n filters=params['conv_filters'],\n kernel_size=params['conv_kernel_size'],\n kernel_regularizer=regularizers.l2(float(params['l2reg'])))\n net = newConv(net)\n # optional pooling\n if params.get('pool_size', None) is not None:\n newPool = keras.layers.MaxPooling1D(\n pool_size=params['pool_size'],\n strides=params.get(\n 'pool_strides',\n params['pool_size']),\n )\n net = newPool(net)\n\n net = Flatten()(net)\n \n if params.get('dense_count', 0) !=0:\n for dense_num in range(params['dense_count']):\n newDense = Dense(\n units=params['dense_units'],\n activation='relu',\n kernel_regularizer=regularizers.l2(float(params['l2reg'])),\n )\n net = newDense(net)\n\n return net\n\ndef make_optimizer(params):\n opzr = keras.optimizers.Adam(**params.get('optimizer', {}))\n return opzr\n\nimport keras.backend as K\ndef model(params):\n \n \n def metric_correlation(y_true, y_pred):\n logger.warn(f'[metric_correlation] y_true size: {y_true.shape}, y_predy size: {y_pred.shape}')\n\n def m(x, w):\n \"\"\"Weighted Mean\"\"\"\n return K.sum(x*w) / K.sum(w)\n def cov(x, y, w):\n \"\"\"Weighted Covariance\"\"\"\n return K.sum(w* (x - m(x, w))*(y - m(y, w)))/ K.sum(w)\n\n def corr(x, y, w):\n \"\"\"Weighted Correlation\"\"\"\n return cov(x, y, w) / K.sqrt(cov(x, x, w) * cov(y, y, w))\n print(y_true.get_shape())\n return corr(y_true, y_pred, K.ones_like(y_true))\n inputs = []\n\n subnets = []\n\n\n inpt = Input(shape=(30,), dtype='float32', name='returns_input')\n inputs.append(inpt)\n\n net = make_net(inpt, params)\n \n condenser = Dense(units=1, activation=None, use_bias=False)\n net = condenser(net)\n \n model = Model(inputs=inputs, outputs = net)\n\n opzr = make_optimizer(params)\n model.compile(\n loss='mean_squared_error',\n optimizer=opzr,\n metrics=[metric_correlation])\n return model",
"execution_count": 3,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "def model_ta(x_train, y_train, x_val, y_val, params):\n print(params)\n # replace the hyperparameter inputs with references to params dictionary \n mdl = model(params)\n \n history_callback = mdl.fit(x_train, y_train,\n batch_size=params['batch_size'],\n epochs=params['epochs'],\n verbose=1,\n validation_data=[x_val, y_val])\n history_callback.history['val_acc']=[42]\n # modify the output model\n return history_callback, mdl",
"execution_count": 4,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "",
"execution_count": null,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "## Compare this:",
"execution_count": 5,
"outputs": []
},
{
"metadata": {
"scrolled": true,
"trusted": true
},
"cell_type": "code",
"source": "h = ta.Scan(x=np.random.normal(loc=0.0, scale=1.0, size=(5000, 30)),\n y=np.random.normal(loc=0.0, scale=1.0, size=(5000, 1)), \n params={\n 'dense_count': [1, 2, 3],\n 'dense_units': [50, 100, 150, 200],\n 'conv_count': [0],\n 'l2reg': [1, 100, 1000],\n 'epochs':[4],\n 'batch_size':[32]\n },\n model=model_ta,\n grid_downsample=0.1)",
"execution_count": 7,
"outputs": [
{
"output_type": "stream",
"text": "\n 0%| | 0/3 [00:00<?, ?it/s]\u001b[A[metric_correlation] y_true size: (?, ?), y_predy size: (?, 1)\n",
"name": "stderr"
},
{
"output_type": "stream",
"text": "{'dense_count': 3, 'dense_units': 150, 'conv_count': 0, 'l2reg': 1000, 'epochs': 4, 'batch_size': 32}\n(?, ?)\nTrain on 3500 samples, validate on 1500 samples\nEpoch 1/4\n3500/3500 [==============================] - 1s 259us/step - loss: 131884.9432 - metric_correlation: 0.0067 - val_loss: 23579.2904 - val_metric_correlation: -0.0166\nEpoch 2/4\n3500/3500 [==============================] - 0s 53us/step - loss: 7208.9134 - metric_correlation: 0.0639 - val_loss: 831.7312 - val_metric_correlation: -0.0019\nEpoch 3/4\n3500/3500 [==============================] - 0s 53us/step - loss: 228.7956 - metric_correlation: 0.0391 - val_loss: 21.8662 - val_metric_correlation: 0.0192\nEpoch 4/4\n3500/3500 [==============================] - 0s 53us/step - loss: 6.4003 - metric_correlation: nan - val_loss: 1.4318 - val_metric_correlation: nan\n",
"name": "stdout"
},
{
"output_type": "stream",
"text": "\n 33%|███▎ | 1/3 [00:01<00:03, 1.81s/it]\u001b[A",
"name": "stderr"
},
{
"output_type": "stream",
"text": "{'dense_count': 1, 'dense_units': 50, 'conv_count': 0, 'l2reg': 1000, 'epochs': 4, 'batch_size': 32}\n",
"name": "stdout"
},
{
"output_type": "stream",
"text": "[metric_correlation] y_true size: (?, ?), y_predy size: (?, 1)\n",
"name": "stderr"
},
{
"output_type": "stream",
"text": "(?, ?)\nTrain on 3500 samples, validate on 1500 samples\nEpoch 1/4\n3500/3500 [==============================] - 0s 76us/step - loss: 21595.8081 - metric_correlation: 0.0228 - val_loss: 10063.0185 - val_metric_correlation: -0.0203\nEpoch 2/4\n3500/3500 [==============================] - 0s 39us/step - loss: 5302.4817 - metric_correlation: 0.0297 - val_loss: 2183.8746 - val_metric_correlation: -0.0241\nEpoch 3/4\n3500/3500 [==============================] - 0s 39us/step - loss: 1058.1092 - metric_correlation: 0.0322 - val_loss: 373.9524 - val_metric_correlation: -0.0126\nEpoch 4/4\n3500/3500 [==============================] - 0s 38us/step - loss: 166.2048 - metric_correlation: 0.0564 - val_loss: 50.1352 - val_metric_correlation: 0.0031\n",
"name": "stdout"
},
{
"output_type": "stream",
"text": "\n 67%|██████▋ | 2/3 [00:02<00:01, 1.55s/it]\u001b[A",
"name": "stderr"
},
{
"output_type": "stream",
"text": "{'dense_count': 2, 'dense_units': 50, 'conv_count': 0, 'l2reg': 1, 'epochs': 4, 'batch_size': 32}\n",
"name": "stdout"
},
{
"output_type": "stream",
"text": "[metric_correlation] y_true size: (?, ?), y_predy size: (?, 1)\n",
"name": "stderr"
},
{
"output_type": "stream",
"text": "(?, ?)\nTrain on 3500 samples, validate on 1500 samples\nEpoch 1/4\n3500/3500 [==============================] - 0s 90us/step - loss: 49.2693 - metric_correlation: -0.0028 - val_loss: 22.4776 - val_metric_correlation: -0.0102\nEpoch 2/4\n3500/3500 [==============================] - 0s 45us/step - loss: 11.9453 - metric_correlation: 0.0178 - val_loss: 5.2996 - val_metric_correlation: -0.0306\nEpoch 3/4\n3500/3500 [==============================] - 0s 46us/step - loss: 2.9990 - metric_correlation: 0.0244 - val_loss: 1.7058 - val_metric_correlation: 0.0152\nEpoch 4/4\n3500/3500 [==============================] - 0s 45us/step - loss: 1.2939 - metric_correlation: 0.0480 - val_loss: 1.1280 - val_metric_correlation: 0.0106\n",
"name": "stdout"
},
{
"output_type": "stream",
"text": "\n100%|██████████| 3/3 [00:03<00:00, 1.41s/it]\u001b[A\n\u001b[A",
"name": "stderr"
},
{
"output_type": "stream",
"text": "Scan Finished!\n",
"name": "stdout"
}
]
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "rpt = ta.Reporting(h)",
"execution_count": 8,
"outputs": []
},
{
"metadata": {
"trusted": true
},
"cell_type": "code",
"source": "rpt.correlate('metric_correlation')",
"execution_count": 9,
"outputs": [
{
"output_type": "execute_result",
"execution_count": 9,
"data": {
"text/plain": "epochs NaN\nconv_count NaN\nbatch_size NaN\nval_metric_correlation 0.332939\ndense_count -0.621059\nl2reg 0.783764\ndense_units -0.145971\nName: metric_correlation, dtype: float64"
},
"metadata": {}
}
]
}
],
"metadata": {
"gist": {
"id": "",
"data": {
"description": "test_custom_metric_corr.ipynb",
"public": true
}
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3",
"language": "python"
},
"language_info": {
"name": "python",
"version": "3.6.5",
"mimetype": "text/x-python",
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"pygments_lexer": "ipython3",
"nbconvert_exporter": "python",
"file_extension": ".py"
},
"toc": {
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"base_numbering": 1,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": true
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment