Skip to content

Instantly share code, notes, and snippets.

@dusskapark
Last active January 4, 2022 05:13
Show Gist options
  • Save dusskapark/cdda3487ed913554247682b238bdebda to your computer and use it in GitHub Desktop.
Save dusskapark/cdda3487ed913554247682b238bdebda to your computer and use it in GitHub Desktop.
model maker generated saved_model test.ipynb
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/dusskapark/cdda3487ed913554247682b238bdebda/model-maker-generated-saved_model-test.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"source": [
"## Install TensorFlow2 Object Detection Dependencies"
],
"metadata": {
"id": "4tkgB2FIU1wX"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "vhhdy8sg5X6v",
"outputId": "dd573fdd-015b-4dd8-c718-0a8c68580910"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Python 3.7.12\n"
]
}
],
"source": [
"import tensorflow\n",
"\n",
"#print(tensorflow.__version__)\n",
"!python3 --version"
]
},
{
"cell_type": "code",
"source": [
"import os\n",
"import pathlib\n",
"\n",
"# Clone the tensorflow models repository if it doesn't already exist\n",
"if \"models\" in pathlib.Path.cwd().parts:\n",
" while \"models\" in pathlib.Path.cwd().parts:\n",
" os.chdir('..')\n",
"elif not pathlib.Path('models').exists():\n",
" !git clone --depth 1 https://github.com/tensorflow/models"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "A6mEkdlfUxl2",
"outputId": "2c2155a6-b41f-4ae5-b4e7-4861561aa0ea"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Cloning into 'models'...\n",
"remote: Enumerating objects: 3222, done.\u001b[K\n",
"remote: Counting objects: 100% (3222/3222), done.\u001b[K\n",
"remote: Compressing objects: 100% (2737/2737), done.\u001b[K\n",
"remote: Total 3222 (delta 865), reused 1348 (delta 441), pack-reused 0\u001b[K\n",
"Receiving objects: 100% (3222/3222), 33.42 MiB | 19.14 MiB/s, done.\n",
"Resolving deltas: 100% (865/865), done.\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# Install the Object Detection API\n",
"%%bash\n",
"cd models/research/\n",
"protoc object_detection/protos/*.proto --python_out=.\n",
"cp object_detection/packages/tf2/setup.py .\n",
"python -m pip install ."
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Fhae9nseU9C8",
"outputId": "e04eb6f4-1a44-4c27-e9e3-e55d5279ddc5"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Processing /content/models/research\n",
"Collecting avro-python3\n",
" Downloading avro-python3-1.10.2.tar.gz (38 kB)\n",
"Collecting apache-beam\n",
" Downloading apache_beam-2.35.0-cp37-cp37m-manylinux2010_x86_64.whl (9.9 MB)\n",
"Requirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (7.1.2)\n",
"Requirement already satisfied: lxml in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (4.2.6)\n",
"Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (3.2.2)\n",
"Requirement already satisfied: Cython in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (0.29.24)\n",
"Requirement already satisfied: contextlib2 in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (0.5.5)\n",
"Collecting tf-slim\n",
" Downloading tf_slim-1.1.0-py2.py3-none-any.whl (352 kB)\n",
"Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.15.0)\n",
"Requirement already satisfied: pycocotools in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (2.0.3)\n",
"Collecting lvis\n",
" Downloading lvis-0.5.3-py3-none-any.whl (14 kB)\n",
"Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.4.1)\n",
"Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.1.5)\n",
"Collecting tf-models-official>=2.5.1\n",
" Downloading tf_models_official-2.7.0-py2.py3-none-any.whl (1.8 MB)\n",
"Collecting tensorflow_io\n",
" Downloading tensorflow_io-0.23.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (23.1 MB)\n",
"Requirement already satisfied: keras in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (2.7.0)\n",
"Requirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.19.5)\n",
"Requirement already satisfied: tensorflow>=2.7.0 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (2.7.0)\n",
"Collecting seqeval\n",
" Downloading seqeval-1.2.2.tar.gz (43 kB)\n",
"Collecting sacrebleu\n",
" Downloading sacrebleu-2.0.0-py3-none-any.whl (90 kB)\n",
"Requirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (5.4.8)\n",
"Requirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.12.8)\n",
"Collecting tensorflow-text>=2.7.0\n",
" Downloading tensorflow_text-2.7.3-cp37-cp37m-manylinux2010_x86_64.whl (4.9 MB)\n",
"Requirement already satisfied: tensorflow-hub>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.12.0)\n",
"Requirement already satisfied: gin-config in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.5.0)\n",
"Collecting tensorflow-addons\n",
" Downloading tensorflow_addons-0.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)\n",
"Requirement already satisfied: oauth2client in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.1.3)\n",
"Collecting py-cpuinfo>=3.3.0\n",
" Downloading py-cpuinfo-8.0.0.tar.gz (99 kB)\n",
"Collecting sentencepiece\n",
" Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n",
"Requirement already satisfied: kaggle>=1.3.9 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.5.12)\n",
"Collecting opencv-python-headless\n",
" Downloading opencv_python_headless-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (47.7 MB)\n",
"Collecting tensorflow-model-optimization>=0.4.1\n",
" Downloading tensorflow_model_optimization-0.7.0-py2.py3-none-any.whl (213 kB)\n",
"Requirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.0.1)\n",
"Collecting pyyaml>=5.1\n",
" Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)\n",
"Requirement already satisfied: google-auth>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.35.0)\n",
"Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.0.1)\n",
"Requirement already satisfied: google-api-core<2dev,>=1.21.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.26.3)\n",
"Requirement already satisfied: httplib2<1dev,>=0.15.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.17.4)\n",
"Requirement already satisfied: google-auth-httplib2>=0.0.3 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.0.4)\n",
"Requirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.17.3)\n",
"Requirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2018.9)\n",
"Requirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (21.3)\n",
"Requirement already satisfied: setuptools>=40.3.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (57.4.0)\n",
"Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.23.0)\n",
"Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.53.0)\n",
"Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.2.8)\n",
"Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (4.2.4)\n",
"Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (4.8)\n",
"Requirement already satisfied: urllib3 in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (1.24.3)\n",
"Requirement already satisfied: certifi in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (2021.10.8)\n",
"Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (4.62.3)\n",
"Requirement already satisfied: python-slugify in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (5.0.2)\n",
"Requirement already satisfied: python-dateutil in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (2.8.2)\n",
"Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=14.3->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.0.6)\n",
"Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.4.8)\n",
"Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.0.4)\n",
"Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.10)\n",
"Requirement already satisfied: tensorflow-estimator<2.8,~=2.7.0rc0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (2.7.0)\n",
"Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (1.13.3)\n",
"Requirement already satisfied: gast<0.5.0,>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (0.4.0)\n",
"Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (3.1.0)\n",
"Requirement already satisfied: tensorboard~=2.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (2.7.0)\n",
"Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (3.10.0.2)\n",
"Requirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (1.42.0)\n",
"Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (0.22.0)\n",
"Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (0.2.0)\n",
"Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (1.1.0)\n",
"Requirement already satisfied: flatbuffers<3.0,>=1.12 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (2.0)\n",
"Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (3.3.0)\n",
"Requirement already satisfied: keras-preprocessing>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (1.1.2)\n",
"Requirement already satisfied: wheel<1.0,>=0.32.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (0.37.0)\n",
"Requirement already satisfied: libclang>=9.0.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (12.0.0)\n",
"Requirement already satisfied: absl-py>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (0.12.0)\n",
"Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (1.6.3)\n",
"Requirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py>=2.9.0->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (1.5.2)\n",
"Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (0.6.1)\n",
"Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (1.8.0)\n",
"Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (3.3.6)\n",
"Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (1.0.1)\n",
"Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (0.4.6)\n",
"Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (1.3.0)\n",
"Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard~=2.6->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (4.8.2)\n",
"Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard~=2.6->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (3.6.0)\n",
"Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow>=2.7.0->tf-models-official>=2.5.1->object-detection==0.1) (3.1.1)\n",
"Requirement already satisfied: dm-tree~=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow-model-optimization>=0.4.1->tf-models-official>=2.5.1->object-detection==0.1) (0.1.6)\n",
"Collecting proto-plus<2,>=1.7.1\n",
" Downloading proto_plus-1.19.8-py3-none-any.whl (45 kB)\n",
"Requirement already satisfied: pyarrow<7.0.0,>=0.15.1 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (3.0.0)\n",
"Requirement already satisfied: pydot<2,>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (1.3.0)\n",
"Collecting requests<3.0.0dev,>=2.18.0\n",
" Downloading requests-2.27.0-py2.py3-none-any.whl (63 kB)\n",
"Collecting fastavro<2,>=0.21.4\n",
" Downloading fastavro-1.4.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.3 MB)\n",
"Requirement already satisfied: pymongo<4.0.0,>=3.8.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (3.12.1)\n",
"Requirement already satisfied: crcmod<2.0,>=1.7 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (1.7)\n",
"Collecting orjson<4.0\n",
" Downloading orjson-3.6.5-cp37-cp37m-manylinux_2_24_x86_64.whl (247 kB)\n",
"Collecting hdfs<3.0.0,>=2.1.0\n",
" Downloading hdfs-2.6.0-py3-none-any.whl (33 kB)\n",
"Collecting dill<0.3.2,>=0.3.1.1\n",
" Downloading dill-0.3.1.1.tar.gz (151 kB)\n",
"Requirement already satisfied: docopt in /usr/local/lib/python3.7/dist-packages (from hdfs<3.0.0,>=2.1.0->apache-beam->object-detection==0.1) (0.6.2)\n",
"Collecting protobuf>=3.12.0\n",
" Downloading protobuf-3.19.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.1 MB)\n",
"Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.0.8)\n",
"Requirement already satisfied: opencv-python>=4.1.0.25 in /usr/local/lib/python3.7/dist-packages (from lvis->object-detection==0.1) (4.1.2.30)\n",
"Requirement already satisfied: cycler>=0.10.0 in /usr/local/lib/python3.7/dist-packages (from lvis->object-detection==0.1) (0.11.0)\n",
"Requirement already satisfied: kiwisolver>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from lvis->object-detection==0.1) (1.3.2)\n",
"Requirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.7/dist-packages (from python-slugify->kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (1.3)\n",
"Collecting portalocker\n",
" Downloading portalocker-2.3.2-py2.py3-none-any.whl (15 kB)\n",
"Requirement already satisfied: tabulate>=0.8.9 in /usr/local/lib/python3.7/dist-packages (from sacrebleu->tf-models-official>=2.5.1->object-detection==0.1) (0.8.9)\n",
"Requirement already satisfied: regex in /usr/local/lib/python3.7/dist-packages (from sacrebleu->tf-models-official>=2.5.1->object-detection==0.1) (2019.12.20)\n",
"Collecting colorama\n",
" Downloading colorama-0.4.4-py2.py3-none-any.whl (16 kB)\n",
"Requirement already satisfied: scikit-learn>=0.21.3 in /usr/local/lib/python3.7/dist-packages (from seqeval->tf-models-official>=2.5.1->object-detection==0.1) (1.0.1)\n",
"Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official>=2.5.1->object-detection==0.1) (3.0.0)\n",
"Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official>=2.5.1->object-detection==0.1) (1.1.0)\n",
"Requirement already satisfied: typeguard>=2.7 in /usr/local/lib/python3.7/dist-packages (from tensorflow-addons->tf-models-official>=2.5.1->object-detection==0.1) (2.7.1)\n",
"Requirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (0.16.0)\n",
"Requirement already satisfied: promise in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (2.3)\n",
"Requirement already satisfied: importlib-resources in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (5.4.0)\n",
"Requirement already satisfied: attrs>=18.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (21.2.0)\n",
"Requirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (1.4.0)\n",
"Collecting tensorflow-io-gcs-filesystem>=0.21.0\n",
" Downloading tensorflow_io_gcs_filesystem-0.23.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (2.1 MB)\n",
"Building wheels for collected packages: object-detection, py-cpuinfo, dill, avro-python3, seqeval\n",
" Building wheel for object-detection (setup.py): started\n",
" Building wheel for object-detection (setup.py): finished with status 'done'\n",
" Created wheel for object-detection: filename=object_detection-0.1-py3-none-any.whl size=1683460 sha256=90626b7267b84e7e64bd9452109804aee5a27343400baafc5262aa5e262db1fd\n",
" Stored in directory: /tmp/pip-ephem-wheel-cache-u6jig4xm/wheels/fa/a4/d2/e9a5057e414fd46c8e543d2706cd836d64e1fcd9eccceb2329\n",
" Building wheel for py-cpuinfo (setup.py): started\n",
" Building wheel for py-cpuinfo (setup.py): finished with status 'done'\n",
" Created wheel for py-cpuinfo: filename=py_cpuinfo-8.0.0-py3-none-any.whl size=22258 sha256=7c9817cb01d9588896fd66fe410ae82f7da5a16781d1b445b07e4556754a4441\n",
" Stored in directory: /root/.cache/pip/wheels/d2/f1/1f/041add21dc9c4220157f1bd2bd6afe1f1a49524c3396b94401\n",
" Building wheel for dill (setup.py): started\n",
" Building wheel for dill (setup.py): finished with status 'done'\n",
" Created wheel for dill: filename=dill-0.3.1.1-py3-none-any.whl size=78546 sha256=12dce603acc99b251799c8e309cd58f338369be7a350371958dc80adbb8c0fd1\n",
" Stored in directory: /root/.cache/pip/wheels/a4/61/fd/c57e374e580aa78a45ed78d5859b3a44436af17e22ca53284f\n",
" Building wheel for avro-python3 (setup.py): started\n",
" Building wheel for avro-python3 (setup.py): finished with status 'done'\n",
" Created wheel for avro-python3: filename=avro_python3-1.10.2-py3-none-any.whl size=44009 sha256=afaa32a001891a4e5fd79b010efef47e20fa0c95b0601177b766aab3a168a4b0\n",
" Stored in directory: /root/.cache/pip/wheels/d6/e5/b1/6b151d9b535ee50aaa6ab27d145a0104b6df02e5636f0376da\n",
" Building wheel for seqeval (setup.py): started\n",
" Building wheel for seqeval (setup.py): finished with status 'done'\n",
" Created wheel for seqeval: filename=seqeval-1.2.2-py3-none-any.whl size=16181 sha256=82bcfa61cf66add6aacf4b4b23dad58809f9e45a4810ca92f8307096e3cbbaba\n",
" Stored in directory: /root/.cache/pip/wheels/05/96/ee/7cac4e74f3b19e3158dce26a20a1c86b3533c43ec72a549fd7\n",
"Successfully built object-detection py-cpuinfo dill avro-python3 seqeval\n",
"Installing collected packages: requests, protobuf, tensorflow-io-gcs-filesystem, portalocker, dill, colorama, tf-slim, tensorflow-text, tensorflow-model-optimization, tensorflow-addons, seqeval, sentencepiece, sacrebleu, pyyaml, py-cpuinfo, proto-plus, orjson, opencv-python-headless, hdfs, fastavro, tf-models-official, tensorflow-io, lvis, avro-python3, apache-beam, object-detection\n",
" Attempting uninstall: requests\n",
" Found existing installation: requests 2.23.0\n",
" Uninstalling requests-2.23.0:\n",
" Successfully uninstalled requests-2.23.0\n",
" Attempting uninstall: protobuf\n",
" Found existing installation: protobuf 3.17.3\n",
" Uninstalling protobuf-3.17.3:\n",
" Successfully uninstalled protobuf-3.17.3\n",
" Attempting uninstall: tensorflow-io-gcs-filesystem\n",
" Found existing installation: tensorflow-io-gcs-filesystem 0.22.0\n",
" Uninstalling tensorflow-io-gcs-filesystem-0.22.0:\n",
" Successfully uninstalled tensorflow-io-gcs-filesystem-0.22.0\n",
" Attempting uninstall: dill\n",
" Found existing installation: dill 0.3.4\n",
" Uninstalling dill-0.3.4:\n",
" Successfully uninstalled dill-0.3.4\n",
" Attempting uninstall: pyyaml\n",
" Found existing installation: PyYAML 3.13\n",
" Uninstalling PyYAML-3.13:\n",
" Successfully uninstalled PyYAML-3.13\n",
"Successfully installed apache-beam-2.35.0 avro-python3-1.10.2 colorama-0.4.4 dill-0.3.1.1 fastavro-1.4.8 hdfs-2.6.0 lvis-0.5.3 object-detection-0.1 opencv-python-headless-4.5.5.62 orjson-3.6.5 portalocker-2.3.2 proto-plus-1.19.8 protobuf-3.19.1 py-cpuinfo-8.0.0 pyyaml-6.0 requests-2.27.0 sacrebleu-2.0.0 sentencepiece-0.1.96 seqeval-1.2.2 tensorflow-addons-0.15.0 tensorflow-io-0.23.1 tensorflow-io-gcs-filesystem-0.23.1 tensorflow-model-optimization-0.7.0 tensorflow-text-2.7.3 tf-models-official-2.7.0 tf-slim-1.1.0\n"
]
},
{
"output_type": "stream",
"name": "stderr",
"text": [
" DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n",
" pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n",
"ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
"multiprocess 0.70.12.2 requires dill>=0.3.4, but you have dill 0.3.1.1 which is incompatible.\n",
"google-colab 1.0.0 requires requests~=2.23.0, but you have requests 2.27.0 which is incompatible.\n",
"datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.\n"
]
}
]
},
{
"cell_type": "markdown",
"source": [
"## Set object detection mothods\n"
],
"metadata": {
"id": "rSMyQUh-VDh8"
}
},
{
"cell_type": "code",
"source": [
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import os\n",
"import random\n",
"import io\n",
"import imageio\n",
"import glob\n",
"import scipy.misc\n",
"import numpy as np\n",
"from six import BytesIO\n",
"from PIL import Image, ImageDraw, ImageFont\n",
"from IPython.display import display, Javascript\n",
"from IPython.display import Image as IPyImage\n",
"\n",
"import tensorflow as tf\n",
"\n",
"from object_detection.utils import label_map_util\n",
"from object_detection.utils import config_util\n",
"from object_detection.utils import visualization_utils as viz_utils\n",
"from object_detection.utils import colab_utils\n",
"from object_detection.builders import model_builder\n",
"\n",
"%matplotlib inline"
],
"metadata": {
"id": "A8siEJqdVL_6"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "m3zGDuA75X6x"
},
"source": [
"# Object Detection From TF2 Saved Model\n",
"\n",
"This demo will take you through the steps of running an \"Android figurine\" TensorFlow 2 detection model on a collection of images. More specifically, this example was generated by `TFlite-model-maker`and exported with `Saved Model Format`.\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "dX0HHkcS5X6z"
},
"source": [
"## Download images \n",
"\n",
"In this example, I used 2 Android figurine images that @khanhlvg uses in his Colab notebook."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "pRL6gLTF5X6z",
"outputId": "0e7eb49f-3a30-4b50-b96b-51d2398f858e"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Downloading data from http://download.tensorflow.org/example_images/android_figurine_sample.jpg\n",
"40960/38234 [================================] - 0s 0us/step\n",
"49152/38234 [======================================] - 0s 0us/step\n",
"Downloading data from http://download.tensorflow.org/example_images/android_figurine.jpg\n",
"1105920/1098205 [==============================] - 0s 0us/step\n",
"1114112/1098205 [==============================] - 0s 0us/step\n"
]
}
],
"source": [
"import os\n",
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)\n",
"import pathlib\n",
"import tensorflow as tf\n",
"\n",
"tf.get_logger().setLevel('ERROR') # Suppress TensorFlow logging (2)\n",
"\n",
"# Enable GPU dynamic memory allocation\n",
"gpus = tf.config.experimental.list_physical_devices('GPU')\n",
"for gpu in gpus:\n",
" tf.config.experimental.set_memory_growth(gpu, True)\n",
"\n",
"def download_images():\n",
" base_url = 'http://download.tensorflow.org/example_images/'\n",
" filenames = ['android_figurine_sample.jpg', 'android_figurine.jpg']\n",
" image_paths = []\n",
" for filename in filenames:\n",
" image_path = tf.keras.utils.get_file(fname=filename,\n",
" origin=base_url + filename,\n",
" untar=False)\n",
" image_path = pathlib.Path(image_path)\n",
" image_paths.append(str(image_path))\n",
" return image_paths\n",
"\n",
"IMAGE_PATHS = download_images()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "sytlUr3q5X60"
},
"source": [
"# Create model with Model-maker \n",
"\n",
"> This notebook comes from the [Model Maker Object Detection for Android Figurine](https://colab.research.google.com/github/khanhlvg/tflite_raspberry_pi/blob/main/object_detection/Train_custom_model_tutorial.ipynb#scrollTo=9ZsLQtJ1AlW_) of @khanhlvg.\n"
]
},
{
"cell_type": "code",
"source": [
"#@title Install the required packages\n",
"!pip install -q tflite-model-maker\n",
"!pip install -q tflite-support"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "ULF6Gku0YVYK",
"outputId": "bacfae22-5ea6-474a-ba0e-5eef35878939"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[K |████████████████████████████████| 616 kB 20.4 MB/s \n",
"\u001b[K |████████████████████████████████| 3.4 MB 57.9 MB/s \n",
"\u001b[K |████████████████████████████████| 77 kB 5.1 MB/s \n",
"\u001b[K |████████████████████████████████| 120 kB 62.1 MB/s \n",
"\u001b[K |████████████████████████████████| 840 kB 53.0 MB/s \n",
"\u001b[K |████████████████████████████████| 6.4 MB 48.6 MB/s \n",
"\u001b[K |████████████████████████████████| 1.1 MB 53.9 MB/s \n",
"\u001b[K |████████████████████████████████| 87 kB 7.7 MB/s \n",
"\u001b[K |████████████████████████████████| 25.3 MB 1.5 MB/s \n",
"\u001b[K |████████████████████████████████| 210 kB 72.5 MB/s \n",
"\u001b[?25h Building wheel for fire (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
"object-detection 0.1 requires tf-models-official>=2.5.1, but you have tf-models-official 2.3.0 which is incompatible.\u001b[0m\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"#@title Import the required packages.\n",
"import numpy as np\n",
"import os\n",
"\n",
"from tflite_model_maker.config import ExportFormat, QuantizationConfig\n",
"from tflite_model_maker import model_spec\n",
"from tflite_model_maker import object_detector\n",
"\n",
"from tflite_support import metadata\n",
"\n",
"import tensorflow as tf\n",
"assert tf.__version__.startswith('2')\n",
"\n",
"tf.get_logger().setLevel('ERROR')\n",
"from absl import logging\n",
"logging.set_verbosity(logging.ERROR)"
],
"metadata": {
"id": "5Ehr5PXZYfgd"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title downloading the dataset\n",
"!wget https://storage.googleapis.com/download.tensorflow.org/data/android_figurine.zip\n",
"!unzip -q android_figurine.zip\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "nqJgpf79Yu2D",
"outputId": "50756fa9-776d-451a-a7f3-b4cab6bd65e6"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"--2022-01-04 04:40:23-- https://storage.googleapis.com/download.tensorflow.org/data/android_figurine.zip\n",
"Resolving storage.googleapis.com (storage.googleapis.com)... 142.250.31.128, 142.251.111.128, 142.251.16.128, ...\n",
"Connecting to storage.googleapis.com (storage.googleapis.com)|142.250.31.128|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 14333895 (14M) [application/zip]\n",
"Saving to: ‘android_figurine.zip’\n",
"\n",
"android_figurine.zi 100%[===================>] 13.67M 73.8MB/s in 0.2s \n",
"\n",
"2022-01-04 04:40:23 (73.8 MB/s) - ‘android_figurine.zip’ saved [14333895/14333895]\n",
"\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"#@title Train the object detection model\n",
"#@markdown Step 1: Load the dataset\n",
"\n",
"train_data = object_detector.DataLoader.from_pascal_voc(\n",
" 'android_figurine/train',\n",
" 'android_figurine/train',\n",
" ['android', 'pig_android']\n",
")\n",
"\n",
"val_data = object_detector.DataLoader.from_pascal_voc(\n",
" 'android_figurine/validate',\n",
" 'android_figurine/validate',\n",
" ['android', 'pig_android']\n",
")\n"
],
"metadata": {
"id": "aJDMCeo6Y8A3"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@markdown Step 2: Select a model architecture\n",
"\n",
"spec = model_spec.get('efficientdet_lite0')\n"
],
"metadata": {
"id": "4Yftc3wrZInw"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@markdown Step 3: Train the TensorFlow model with the training data.\n",
"\n",
"model = object_detector.create(train_data, model_spec=spec, batch_size=4, train_whole_model=True, epochs=20, validation_data=val_data)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "sKHSVM9IZWgP",
"outputId": "d915c984-de6e-4679-cbd3-1c437ec07fb5"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Epoch 1/20\n",
"15/15 [==============================] - 72s 2s/step - det_loss: 1.6625 - cls_loss: 1.0859 - box_loss: 0.0115 - reg_l2_loss: 0.0630 - loss: 1.7255 - learning_rate: 0.0065 - gradient_norm: 2.1645 - val_det_loss: 1.4981 - val_cls_loss: 1.0361 - val_box_loss: 0.0092 - val_reg_l2_loss: 0.0630 - val_loss: 1.5611\n",
"Epoch 2/20\n",
"15/15 [==============================] - 32s 2s/step - det_loss: 1.4214 - cls_loss: 0.9565 - box_loss: 0.0093 - reg_l2_loss: 0.0630 - loss: 1.4844 - learning_rate: 0.0049 - gradient_norm: 2.2217 - val_det_loss: 1.1747 - val_cls_loss: 0.7725 - val_box_loss: 0.0080 - val_reg_l2_loss: 0.0630 - val_loss: 1.2377\n",
"Epoch 3/20\n",
"15/15 [==============================] - 37s 3s/step - det_loss: 1.0357 - cls_loss: 0.6933 - box_loss: 0.0068 - reg_l2_loss: 0.0630 - loss: 1.0987 - learning_rate: 0.0048 - gradient_norm: 2.3619 - val_det_loss: 0.9099 - val_cls_loss: 0.5911 - val_box_loss: 0.0064 - val_reg_l2_loss: 0.0630 - val_loss: 0.9729\n",
"Epoch 4/20\n",
"15/15 [==============================] - 32s 2s/step - det_loss: 0.8590 - cls_loss: 0.5653 - box_loss: 0.0059 - reg_l2_loss: 0.0630 - loss: 0.9220 - learning_rate: 0.0046 - gradient_norm: 2.7841 - val_det_loss: 0.8002 - val_cls_loss: 0.5099 - val_box_loss: 0.0058 - val_reg_l2_loss: 0.0630 - val_loss: 0.8632\n",
"Epoch 5/20\n",
"15/15 [==============================] - 43s 3s/step - det_loss: 0.7599 - cls_loss: 0.4689 - box_loss: 0.0058 - reg_l2_loss: 0.0630 - loss: 0.8230 - learning_rate: 0.0043 - gradient_norm: 2.7318 - val_det_loss: 0.6555 - val_cls_loss: 0.4129 - val_box_loss: 0.0049 - val_reg_l2_loss: 0.0630 - val_loss: 0.7185\n",
"Epoch 6/20\n",
"15/15 [==============================] - 38s 3s/step - det_loss: 0.6445 - cls_loss: 0.4079 - box_loss: 0.0047 - reg_l2_loss: 0.0630 - loss: 0.7075 - learning_rate: 0.0040 - gradient_norm: 2.9677 - val_det_loss: 0.5936 - val_cls_loss: 0.3788 - val_box_loss: 0.0043 - val_reg_l2_loss: 0.0630 - val_loss: 0.6566\n",
"Epoch 7/20\n",
"15/15 [==============================] - 32s 2s/step - det_loss: 0.5586 - cls_loss: 0.3521 - box_loss: 0.0041 - reg_l2_loss: 0.0630 - loss: 0.6216 - learning_rate: 0.0037 - gradient_norm: 2.8052 - val_det_loss: 0.5450 - val_cls_loss: 0.3584 - val_box_loss: 0.0037 - val_reg_l2_loss: 0.0630 - val_loss: 0.6081\n",
"Epoch 8/20\n",
"15/15 [==============================] - 33s 2s/step - det_loss: 0.5463 - cls_loss: 0.3632 - box_loss: 0.0037 - reg_l2_loss: 0.0630 - loss: 0.6094 - learning_rate: 0.0033 - gradient_norm: 3.6594 - val_det_loss: 0.5208 - val_cls_loss: 0.3453 - val_box_loss: 0.0035 - val_reg_l2_loss: 0.0630 - val_loss: 0.5838\n",
"Epoch 9/20\n",
"15/15 [==============================] - 39s 3s/step - det_loss: 0.4618 - cls_loss: 0.2935 - box_loss: 0.0034 - reg_l2_loss: 0.0630 - loss: 0.5249 - learning_rate: 0.0029 - gradient_norm: 3.0050 - val_det_loss: 0.4686 - val_cls_loss: 0.3070 - val_box_loss: 0.0032 - val_reg_l2_loss: 0.0630 - val_loss: 0.5317\n",
"Epoch 10/20\n",
"15/15 [==============================] - 40s 3s/step - det_loss: 0.4616 - cls_loss: 0.2958 - box_loss: 0.0033 - reg_l2_loss: 0.0630 - loss: 0.5246 - learning_rate: 0.0025 - gradient_norm: 2.6188 - val_det_loss: 0.4542 - val_cls_loss: 0.3073 - val_box_loss: 0.0029 - val_reg_l2_loss: 0.0630 - val_loss: 0.5172\n",
"Epoch 11/20\n",
"15/15 [==============================] - 36s 2s/step - det_loss: 0.4015 - cls_loss: 0.2673 - box_loss: 0.0027 - reg_l2_loss: 0.0630 - loss: 0.4646 - learning_rate: 0.0021 - gradient_norm: 2.5937 - val_det_loss: 0.4039 - val_cls_loss: 0.2786 - val_box_loss: 0.0025 - val_reg_l2_loss: 0.0630 - val_loss: 0.4670\n",
"Epoch 12/20\n",
"15/15 [==============================] - 35s 2s/step - det_loss: 0.4026 - cls_loss: 0.2647 - box_loss: 0.0028 - reg_l2_loss: 0.0631 - loss: 0.4657 - learning_rate: 0.0017 - gradient_norm: 2.4295 - val_det_loss: 0.3910 - val_cls_loss: 0.2594 - val_box_loss: 0.0026 - val_reg_l2_loss: 0.0631 - val_loss: 0.4540\n",
"Epoch 13/20\n",
"15/15 [==============================] - 34s 2s/step - det_loss: 0.4019 - cls_loss: 0.2542 - box_loss: 0.0030 - reg_l2_loss: 0.0631 - loss: 0.4650 - learning_rate: 0.0013 - gradient_norm: 2.6056 - val_det_loss: 0.3534 - val_cls_loss: 0.2480 - val_box_loss: 0.0021 - val_reg_l2_loss: 0.0631 - val_loss: 0.4164\n",
"Epoch 14/20\n",
"15/15 [==============================] - 31s 2s/step - det_loss: 0.3663 - cls_loss: 0.2509 - box_loss: 0.0023 - reg_l2_loss: 0.0631 - loss: 0.4294 - learning_rate: 9.6772e-04 - gradient_norm: 2.5868 - val_det_loss: 0.3507 - val_cls_loss: 0.2319 - val_box_loss: 0.0024 - val_reg_l2_loss: 0.0631 - val_loss: 0.4137\n",
"Epoch 15/20\n",
"15/15 [==============================] - 43s 3s/step - det_loss: 0.3671 - cls_loss: 0.2369 - box_loss: 0.0026 - reg_l2_loss: 0.0631 - loss: 0.4301 - learning_rate: 6.6413e-04 - gradient_norm: 1.8747 - val_det_loss: 0.3293 - val_cls_loss: 0.2237 - val_box_loss: 0.0021 - val_reg_l2_loss: 0.0631 - val_loss: 0.3923\n",
"Epoch 16/20\n",
"15/15 [==============================] - 34s 2s/step - det_loss: 0.3719 - cls_loss: 0.2478 - box_loss: 0.0025 - reg_l2_loss: 0.0631 - loss: 0.4349 - learning_rate: 4.1061e-04 - gradient_norm: 2.3121 - val_det_loss: 0.3185 - val_cls_loss: 0.2195 - val_box_loss: 0.0020 - val_reg_l2_loss: 0.0631 - val_loss: 0.3815\n",
"Epoch 17/20\n",
"15/15 [==============================] - 33s 2s/step - det_loss: 0.3738 - cls_loss: 0.2416 - box_loss: 0.0026 - reg_l2_loss: 0.0631 - loss: 0.4369 - learning_rate: 2.1409e-04 - gradient_norm: 2.5710 - val_det_loss: 0.3213 - val_cls_loss: 0.2169 - val_box_loss: 0.0021 - val_reg_l2_loss: 0.0631 - val_loss: 0.3844\n",
"Epoch 18/20\n",
"15/15 [==============================] - 38s 3s/step - det_loss: 0.3335 - cls_loss: 0.2279 - box_loss: 0.0021 - reg_l2_loss: 0.0631 - loss: 0.3965 - learning_rate: 7.9920e-05 - gradient_norm: 2.3260 - val_det_loss: 0.3209 - val_cls_loss: 0.2145 - val_box_loss: 0.0021 - val_reg_l2_loss: 0.0631 - val_loss: 0.3839\n",
"Epoch 19/20\n",
"15/15 [==============================] - 33s 2s/step - det_loss: 0.3517 - cls_loss: 0.2451 - box_loss: 0.0021 - reg_l2_loss: 0.0631 - loss: 0.4148 - learning_rate: 1.1764e-05 - gradient_norm: 3.1505 - val_det_loss: 0.3199 - val_cls_loss: 0.2131 - val_box_loss: 0.0021 - val_reg_l2_loss: 0.0631 - val_loss: 0.3830\n",
"Epoch 20/20\n",
"15/15 [==============================] - 42s 3s/step - det_loss: 0.3602 - cls_loss: 0.2349 - box_loss: 0.0025 - reg_l2_loss: 0.0631 - loss: 0.4232 - learning_rate: 1.1480e-05 - gradient_norm: 2.1088 - val_det_loss: 0.3212 - val_cls_loss: 0.2131 - val_box_loss: 0.0022 - val_reg_l2_loss: 0.0631 - val_loss: 0.3843\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"#@markdown Step 4. Evaluate the model with the validation data.\n",
"\n",
"model.evaluate(val_data)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "m17zvqkXZk2r",
"outputId": "f073a473-ef3d-4257-b698-ad0be94bf51e"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\r1/1 [==============================] - 8s 8s/step\n",
"\n"
]
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"{'AP': 0.7415968,\n",
" 'AP50': 1.0,\n",
" 'AP75': 1.0,\n",
" 'AP_/android': 0.7218075,\n",
" 'AP_/pig_android': 0.76138616,\n",
" 'APl': 0.7415968,\n",
" 'APm': -1.0,\n",
" 'APs': -1.0,\n",
" 'ARl': 0.77569443,\n",
" 'ARm': -1.0,\n",
" 'ARmax1': 0.74583334,\n",
" 'ARmax10': 0.77569443,\n",
" 'ARmax100': 0.77569443,\n",
" 'ARs': -1.0}"
]
},
"metadata": {},
"execution_count": 13
}
]
},
{
"cell_type": "code",
"source": [
"#Step 5: Export as a TensorFlow Lite model.\n",
"model.export(export_dir=\"./js_export/\", export_format=[ExportFormat.SAVED_MODEL])\n"
],
"metadata": {
"id": "Z3XhSR0yZseq"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "yyuKbX6q5X61"
},
"source": [
"# Test the SAVED MODEL\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "zkInf8gu5X62",
"outputId": "8c59b86d-71ef-4eb8-eb58-16d226c813b0"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"item {\n",
" name: \"android\",\n",
" id: 1\n",
"}\n",
"\n",
"item {\n",
" name: \"pig_android\",\n",
" id: 2\n",
"}\n",
"\n",
"\n"
]
}
],
"source": [
"# Generate label_map.pbtxt\n",
"\n",
"import os\n",
"\n",
"obj = ['android', 'pig_android']\n",
"\n",
"pbtxt = \"\"\n",
"for i in range(len(obj)):\n",
" pbtxt += \"item {\\n name: \\\"\"+obj[i]+\"\\\",\\n id: \"+str(i+1)+\"\\n}\\n\"+\"\\n\"\n",
" \n",
"with open(\"label_map.pbtxt\", \"w\", encoding=\"utf-8\") as f:\n",
" f.write(pbtxt)\n",
"\n",
"print(pbtxt)\n",
"\n",
"PATH_TO_LABELS = './label_map.pbtxt'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "p4Fq2mCX5X63",
"outputId": "a4cf56e8-d5d4-4bcc-8dfd-6666618f153f"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Loading model...Done! Took 22.472423315048218 seconds\n"
]
}
],
"source": [
"# Load the model\n",
"\n",
"import time\n",
"from object_detection.utils import label_map_util\n",
"from object_detection.utils import visualization_utils as viz_utils\n",
"\n",
"PATH_TO_SAVED_MODEL = \"./js_export/saved_model\"\n",
"\n",
"print('Loading model...', end='')\n",
"start_time = time.time()\n",
"\n",
"# Load saved model and build the detection function\n",
"detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL)\n",
"\n",
"end_time = time.time()\n",
"elapsed_time = end_time - start_time\n",
"print('Done! Took {} seconds'.format(elapsed_time))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "IlII0T575X63"
},
"source": [
"## Load label map data (for plotting)\n",
"Label maps correspond index numbers to category names, so that when our convolution network\n",
"predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility\n",
"functions, but anything that returns a dictionary mapping integers to appropriate string labels\n",
"would be fine.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "2seNv1mn5X63"
},
"outputs": [],
"source": [
"category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,\n",
" use_display_name=True)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "RBtfZ3aY5X64"
},
"source": [
"## Putting everything together\n",
"The code shown below loads an image, runs it through the detection model and visualizes the\n",
"detection results, including the keypoints.\n",
"\n",
"Note that this will take a long time (several minutes) the first time you run this code due to\n",
"tf.function's trace-compilation --- on subsequent runs (e.g. on new images), things will be\n",
"faster.\n",
"\n",
"Here are some simple things to try out if you are curious:\n",
"\n",
"* Modify some of the input images and see if detection still works. Some simple things to try out here (just uncomment the relevant portions of code) include flipping the image horizontally, or converting to grayscale (note that we still expect the input image to have 3 channels).\n",
"* Print out `detections['detection_boxes']` and try to match the box locations to the boxes in the image. Notice that coordinates are given in normalized form (i.e., in the interval [0, 1]).\n",
"* Set ``min_score_thresh`` to other values (between 0 and 1) to allow more detections in or to filter out more detections.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 253
},
"id": "ticEgeTB5X64",
"outputId": "c0419d41-21ac-4f13-996c-f35a90402cad"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Running inference for /root/.keras/datasets/android_figurine_sample.jpg... "
]
},
{
"output_type": "error",
"ename": "AttributeError",
"evalue": "ignored",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-18-235f1e21b5e7>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 46\u001b[0m \u001b[0;31m# Convert to numpy arrays, and take index [0] to remove the batch dimension.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 47\u001b[0m \u001b[0;31m# We're only interested in the first num_detections.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 48\u001b[0;31m \u001b[0mnum_detections\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdetections\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'num_detections'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 49\u001b[0m detections = {key: value[0, :num_detections].numpy()\n\u001b[1;32m 50\u001b[0m for key, value in detections.items()}\n",
"\u001b[0;31mAttributeError\u001b[0m: 'tuple' object has no attribute 'pop'"
]
}
],
"source": [
"import numpy as np\n",
"from PIL import Image\n",
"import matplotlib.pyplot as plt\n",
"import warnings\n",
"warnings.filterwarnings('ignore') # Suppress Matplotlib warnings\n",
"\n",
"def load_image_into_numpy_array(path):\n",
" \"\"\"Load an image from file into a numpy array.\n",
"\n",
" Puts image into numpy array to feed into tensorflow graph.\n",
" Note that by convention we put it into a numpy array with shape\n",
" (height, width, channels), where channels=3 for RGB.\n",
"\n",
" Args:\n",
" path: the file path to the image\n",
"\n",
" Returns:\n",
" uint8 numpy array with shape (img_height, img_width, 3)\n",
" \"\"\"\n",
" return np.array(Image.open(path))\n",
"\n",
"\n",
"for image_path in IMAGE_PATHS:\n",
"\n",
" print('Running inference for {}... '.format(image_path), end='')\n",
"\n",
" image_np = load_image_into_numpy_array(image_path)\n",
"\n",
" # Things to try:\n",
" # Flip horizontally\n",
" # image_np = np.fliplr(image_np).copy()\n",
"\n",
" # Convert image to grayscale\n",
" # image_np = np.tile(\n",
" # np.mean(image_np, 2, keepdims=True), (1, 1, 3)).astype(np.uint8)\n",
"\n",
" # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n",
" input_tensor = tf.convert_to_tensor(image_np)\n",
" # The model expects a batch of images, so add an axis with `tf.newaxis`.\n",
" input_tensor = input_tensor[tf.newaxis, ...]\n",
"\n",
" # input_tensor = np.expand_dims(image_np, 0)\n",
" detections = detect_fn(input_tensor)\n",
"\n",
" # All outputs are batches tensors.\n",
" # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n",
" # We're only interested in the first num_detections.\n",
" num_detections = int(detections.pop('num_detections'))\n",
" detections = {key: value[0, :num_detections].numpy()\n",
" for key, value in detections.items()}\n",
" detections['num_detections'] = num_detections\n",
"\n",
" # detection_classes should be ints.\n",
" detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n",
"\n",
" image_np_with_detections = image_np.copy()\n",
"\n",
" viz_utils.visualize_boxes_and_labels_on_image_array(\n",
" image_np_with_detections,\n",
" detections['detection_boxes'],\n",
" detections['detection_classes'],\n",
" detections['detection_scores'],\n",
" category_index,\n",
" use_normalized_coordinates=True,\n",
" max_boxes_to_draw=200,\n",
" min_score_thresh=.30,\n",
" agnostic_mode=False)\n",
"\n",
" plt.figure()\n",
" plt.imshow(image_np_with_detections)\n",
" print('Done')\n",
"plt.show()\n",
"\n",
"# sphinx_gallery_thumbnail_number = 2"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
},
"colab": {
"name": "model maker generated saved_model test.ipynb",
"provenance": [],
"collapsed_sections": [],
"toc_visible": true,
"include_colab_link": true
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment