Skip to content

Instantly share code, notes, and snippets.

@gthb
Created December 12, 2019 11:42
Show Gist options
  • Save gthb/0498ea482a4d19df1b43dd74d93a1262 to your computer and use it in GitHub Desktop.
Save gthb/0498ea482a4d19df1b43dd74d93a1262 to your computer and use it in GitHub Desktop.
tensorflow-issue-34827
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Untitled257.ipynb",
"provenance": [],
"collapsed_sections": [],
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/gthb/ffcdd8447683d7edcb8fef5e0a8bc170/untitled257.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"id": "d2_p4lFmhlsc",
"colab_type": "code",
"outputId": "8ff8ff2f-1ce1-40c2-839c-8cf10b8d57ca",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000
}
},
"source": [
"!pip install tensorflow==2.0.0"
],
"execution_count": 0,
"outputs": [
{
"output_type": "stream",
"text": [
"Collecting tensorflow==2.0.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/46/0f/7bd55361168bb32796b360ad15a25de6966c9c1beb58a8e30c01c8279862/tensorflow-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl (86.3MB)\n",
"\u001b[K |████████████████████████████████| 86.3MB 40kB/s \n",
"\u001b[?25hRequirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (0.1.8)\n",
"Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (1.12.0)\n",
"Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (1.1.0)\n",
"Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (1.15.0)\n",
"Requirement already satisfied: numpy<2.0,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (1.17.4)\n",
"Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (0.8.1)\n",
"Collecting tensorflow-estimator<2.1.0,>=2.0.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/fc/08/8b927337b7019c374719145d1dceba21a8bb909b93b1ad6f8fb7d22c1ca1/tensorflow_estimator-2.0.1-py2.py3-none-any.whl (449kB)\n",
"\u001b[K |████████████████████████████████| 450kB 32.1MB/s \n",
"\u001b[?25hRequirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (3.10.0)\n",
"Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (3.1.0)\n",
"Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (1.11.2)\n",
"Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (0.33.6)\n",
"Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (1.1.0)\n",
"Requirement already satisfied: gast==0.2.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (0.2.2)\n",
"Requirement already satisfied: keras-applications>=1.0.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (1.0.8)\n",
"Collecting tensorboard<2.1.0,>=2.0.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/76/54/99b9d5d52d5cb732f099baaaf7740403e83fe6b0cedde940fabd2b13d75a/tensorboard-2.0.2-py3-none-any.whl (3.8MB)\n",
"\u001b[K |████████████████████████████████| 3.8MB 48.5MB/s \n",
"\u001b[?25hRequirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.0.0) (0.8.0)\n",
"Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tensorflow==2.0.0) (42.0.2)\n",
"Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow==2.0.0) (2.8.0)\n",
"Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (2.21.0)\n",
"Collecting google-auth<2,>=1.6.3\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/54/31/f944cbd5bdbcc90d5b36f0615036308c8ec1e41b4788da5b55d4900f6803/google_auth-1.8.2-py2.py3-none-any.whl (75kB)\n",
"\u001b[K |████████████████████████████████| 81kB 6.7MB/s \n",
"\u001b[?25hRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (0.4.1)\n",
"Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (0.16.0)\n",
"Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (3.1.1)\n",
"Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (1.24.3)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (2019.11.28)\n",
"Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (3.0.4)\n",
"Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (2.8)\n",
"Requirement already satisfied: cachetools<3.2,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (3.1.1)\n",
"Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (0.2.7)\n",
"Requirement already satisfied: rsa<4.1,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (4.0)\n",
"Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (1.3.0)\n",
"Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (0.4.8)\n",
"Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.1.0,>=2.0.0->tensorflow==2.0.0) (3.1.0)\n",
"\u001b[31mERROR: tensorboard 2.0.2 has requirement grpcio>=1.24.3, but you'll have grpcio 1.15.0 which is incompatible.\u001b[0m\n",
"\u001b[31mERROR: google-colab 1.0.0 has requirement google-auth~=1.4.0, but you'll have google-auth 1.8.2 which is incompatible.\u001b[0m\n",
"Installing collected packages: tensorflow-estimator, google-auth, tensorboard, tensorflow\n",
" Found existing installation: tensorflow-estimator 1.15.1\n",
" Uninstalling tensorflow-estimator-1.15.1:\n",
" Successfully uninstalled tensorflow-estimator-1.15.1\n",
" Found existing installation: google-auth 1.4.2\n",
" Uninstalling google-auth-1.4.2:\n",
" Successfully uninstalled google-auth-1.4.2\n",
" Found existing installation: tensorboard 1.15.0\n",
" Uninstalling tensorboard-1.15.0:\n",
" Successfully uninstalled tensorboard-1.15.0\n",
" Found existing installation: tensorflow 1.15.0\n",
" Uninstalling tensorflow-1.15.0:\n",
" Successfully uninstalled tensorflow-1.15.0\n",
"Successfully installed google-auth-1.8.2 tensorboard-2.0.2 tensorflow-2.0.0 tensorflow-estimator-2.0.1\n"
],
"name": "stdout"
},
{
"output_type": "display_data",
"data": {
"application/vnd.colab-display-data+json": {
"pip_warning": {
"packages": [
"google"
]
}
}
},
"metadata": {
"tags": []
}
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "L7Vz_hSThrkW",
"colab_type": "code",
"outputId": "7d179dc7-ff54-444e-f431-24ac8d33a71d",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000
}
},
"source": [
"import gc\n",
"import time\n",
"from itertools import islice\n",
"import tracemalloc\n",
"from os.path import basename\n",
"\n",
"import tensorflow as tf\n",
"import numpy as np\n",
"\n",
"CUMULATIVE = True # set to False to just compare to previous snapshot each time\n",
"TRACEMALLOC_EVERY = 1000\n",
"TRACEMALLOC_IGNORE = \"tracemalloc.py:\", \"inspect.py:\"\n",
"\n",
"input_tensor = tf.keras.Input(shape=(3,), name='input')\n",
"output_tensor = tf.keras.layers.Dense(3, name='output')(input_tensor)\n",
"model = tf.keras.Model(inputs=[input_tensor], outputs=[output_tensor])\n",
"\n",
"model.compile(optimizer='adam',\n",
" loss='categorical_crossentropy',\n",
" metrics=['categorical_accuracy'])\n",
"\n",
"\n",
"def random_input():\n",
" return np.random.random_sample([1, 3])\n",
"\n",
"model.predict_on_batch(random_input())\n",
"\n",
"tracemalloc.start(50)\n",
"\n",
"already_printed_from_output_shape_cache = []\n",
"\n",
"current_frame_name = getattr(locals(), '__file__', '<ipython-input')\n",
"\n",
"def snip_traceback(tb):\n",
" \"\"\"Yield all frames from the traceback up to and including this file/cell\"\"\"\n",
" for frame in tb:\n",
" yield frame\n",
" if current_frame_name in str(frame):\n",
" break\n",
"\n",
"predictions_since_first = 0\n",
"first_snapshot = last_snapshot = None\n",
"\n",
"while True:\n",
" print(\"Doing %d predictions...\" % TRACEMALLOC_EVERY)\n",
" for _ in range(TRACEMALLOC_EVERY):\n",
" model.predict_on_batch(random_input())\n",
"\n",
" gc.collect()\n",
" time.sleep(0.1)\n",
" snapshot = tracemalloc.take_snapshot()\n",
" if first_snapshot is None:\n",
" first_snapshot = last_snapshot = snapshot\n",
" continue\n",
"\n",
" predictions_since_first += TRACEMALLOC_EVERY\n",
" top_stats = snapshot.compare_to(first_snapshot if CUMULATIVE else last_snapshot, 'traceback')\n",
" last_snapshot = snapshot\n",
"\n",
" num_predictions = predictions_since_first if CUMULATIVE else TRACEMALLOC_EVERY\n",
" notable_mem_usage_diff = \"\\n\".join(\n",
" f\"{tracemalloc._format_size(stat.size_diff, True):10} \"\n",
" f\"({tracemalloc._format_size(stat.size_diff // num_predictions, True):7} per prediction) \"\n",
" f\"{stat.count_diff:+4} objs \"\n",
" f\"({stat.count_diff / num_predictions:.2f} per prediction) at:\\n\"\n",
" + \"\\n\".join(\" %s\" % tb for tb in snip_traceback(stat.traceback))\n",
" for stat in islice(\n",
" (s for s in top_stats\n",
" if not any(i in str(tb) for tb in s.traceback for i in TRACEMALLOC_IGNORE)\n",
" and any('tensorflow' in str(tb) for tb in s.traceback)\n",
" and abs(s.size_diff) // num_predictions > 0\n",
" ),\n",
" 3\n",
" )\n",
" )\n",
" if notable_mem_usage_diff:\n",
" print(\"Notable memory usage diffs over %d predictions:\\n%s\\n\" % (num_predictions, notable_mem_usage_diff))\n",
" else:\n",
" print(\"No notable memory usage diff over %d predictions\\n\" % (num_predictions,))\n",
" new_cache_elements = [item for item in model._output_shape_cache.items()\n",
" if item not in already_printed_from_output_shape_cache]\n",
" if new_cache_elements:\n",
" print(\"model._output_shape_cache has grown to %d, new elements are:\\n%s\\n\" % (\n",
" len(model._output_shape_cache),\n",
" \"\\n\".join(str(item) for item in new_cache_elements)\n",
" ))\n",
" already_printed_from_output_shape_cache.extend(new_cache_elements)"
],
"execution_count": 0,
"outputs": [
{
"output_type": "stream",
"text": [
"Doing 1000 predictions...\n",
"Doing 1000 predictions...\n",
"No notable memory usage diff over 1000 predictions\n",
"\n",
"model._output_shape_cache has grown to 8, new elements are:\n",
"('140167134436824', TensorShape([None, 3]))\n",
"('140166965061448', TensorShape([None, 3]))\n",
"('140166965063464', TensorShape([None, 3]))\n",
"('140166965062904', TensorShape([None, 3]))\n",
"('140166965062792', TensorShape([None, 3]))\n",
"('140166965063240', TensorShape([None, 3]))\n",
"('140167293004936', TensorShape([None, 3]))\n",
"('140167280454680', TensorShape([None, 3]))\n",
"\n",
"Doing 1000 predictions...\n",
"No notable memory usage diff over 2000 predictions\n",
"\n",
"model._output_shape_cache has grown to 12, new elements are:\n",
"('140167280454568', TensorShape([None, 3]))\n",
"('140167280455576', TensorShape([None, 3]))\n",
"('140167280455016', TensorShape([None, 3]))\n",
"('140167280455184', TensorShape([None, 3]))\n",
"\n",
"Doing 1000 predictions...\n",
"No notable memory usage diff over 3000 predictions\n",
"\n",
"model._output_shape_cache has grown to 15, new elements are:\n",
"('140167289454432', TensorShape([None, 3]))\n",
"('140166959705056', TensorShape([None, 3]))\n",
"('140167144974152', TensorShape([None, 3]))\n",
"\n",
"Doing 1000 predictions...\n",
"No notable memory usage diff over 4000 predictions\n",
"\n",
"model._output_shape_cache has grown to 18, new elements are:\n",
"('140167192199296', TensorShape([None, 3]))\n",
"('140167192199744', TensorShape([None, 3]))\n",
"('140167192200808', TensorShape([None, 3]))\n",
"\n",
"Doing 1000 predictions...\n",
"No notable memory usage diff over 5000 predictions\n",
"\n",
"model._output_shape_cache has grown to 19, new elements are:\n",
"('140167192202936', TensorShape([None, 3]))\n",
"\n",
"Doing 1000 predictions...\n",
"No notable memory usage diff over 6000 predictions\n",
"\n",
"model._output_shape_cache has grown to 28, new elements are:\n",
"('140167192200192', TensorShape([None, 3]))\n",
"('140167192202768', TensorShape([None, 3]))\n",
"('140167192200360', TensorShape([None, 3]))\n",
"('140167192200528', TensorShape([None, 3]))\n",
"('140166959705672', TensorShape([None, 3]))\n",
"('140166959705280', TensorShape([None, 3]))\n",
"('140166959704944', TensorShape([None, 3]))\n",
"('140166959705000', TensorShape([None, 3]))\n",
"('140166959704384', TensorShape([None, 3]))\n",
"\n",
"Doing 1000 predictions...\n",
"No notable memory usage diff over 7000 predictions\n",
"\n",
"Doing 1000 predictions...\n",
"No notable memory usage diff over 8000 predictions\n",
"\n",
"model._output_shape_cache has grown to 31, new elements are:\n",
"('140167106518824', TensorShape([None, 3]))\n",
"('140167106515240', TensorShape([None, 3]))\n",
"('140167106515016', TensorShape([None, 3]))\n",
"\n",
"Doing 1000 predictions...\n"
],
"name": "stdout"
},
{
"output_type": "error",
"ename": "KeyboardInterrupt",
"evalue": "ignored",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-21-01b68250fa1b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mabs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize_diff\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m//\u001b[0m \u001b[0mnum_predictions\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 72\u001b[0m ),\n\u001b[0;32m---> 73\u001b[0;31m \u001b[0;36m3\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 74\u001b[0m )\n\u001b[1;32m 75\u001b[0m )\n",
"\u001b[0;32m<ipython-input-21-01b68250fa1b>\u001b[0m in \u001b[0;36m<genexpr>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mnum_predictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpredictions_since_first\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mCUMULATIVE\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mTRACEMALLOC_EVERY\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m notable_mem_usage_diff = \"\\n\".join(\n\u001b[0;32m---> 62\u001b[0;31m \u001b[0;34mf\"{tracemalloc._format_size(stat.size_diff, True):10} \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 63\u001b[0m \u001b[0;34mf\"({tracemalloc._format_size(stat.size_diff // num_predictions, True):7} per prediction) \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;34mf\"{stat.count_diff:+4} objs \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-21-01b68250fa1b>\u001b[0m in \u001b[0;36m<genexpr>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 67\u001b[0m for stat in islice(\n\u001b[1;32m 68\u001b[0m (s for s in top_stats\n\u001b[0;32m---> 69\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0many\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtb\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraceback\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mTRACEMALLOC_IGNORE\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 70\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0many\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'tensorflow'\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtb\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraceback\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mabs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize_diff\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m//\u001b[0m \u001b[0mnum_predictions\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-21-01b68250fa1b>\u001b[0m in \u001b[0;36m<genexpr>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 67\u001b[0m for stat in islice(\n\u001b[1;32m 68\u001b[0m (s for s in top_stats\n\u001b[0;32m---> 69\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0many\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtb\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraceback\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mTRACEMALLOC_IGNORE\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 70\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0many\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'tensorflow'\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtb\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraceback\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mabs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize_diff\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m//\u001b[0m \u001b[0mnum_predictions\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/lib/python3.6/tracemalloc.py\u001b[0m in \u001b[0;36m__str__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__str__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 165\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0;34m\"%s:%s\"\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlineno\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 166\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 167\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__repr__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "M0oHRhIgjGvw",
"colab_type": "code",
"colab": {}
},
"source": [
""
],
"execution_count": 0,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment