Last active
October 30, 2022 16:53
-
-
Save josephrocca/91e876fd90e6b7c88429258ba2384a36 to your computer and use it in GitHub Desktop.
onnx-runtime-python-inference.ipynb
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"nbformat": 4, | |
"nbformat_minor": 0, | |
"metadata": { | |
"colab": { | |
"provenance": [], | |
"collapsed_sections": [], | |
"authorship_tag": "ABX9TyOw3/zBckQ53PPffjCgliTg", | |
"include_colab_link": true | |
}, | |
"kernelspec": { | |
"name": "python3", | |
"display_name": "Python 3" | |
}, | |
"language_info": { | |
"name": "python" | |
} | |
}, | |
"cells": [ | |
{ | |
"cell_type": "markdown", | |
"metadata": { | |
"id": "view-in-github", | |
"colab_type": "text" | |
}, | |
"source": [ | |
"<a href=\"https://colab.research.google.com/gist/josephrocca/91e876fd90e6b7c88429258ba2384a36/onnx-runtime-python-inference.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"!wget https://huggingface.co/rocca/lyra-v2-soundstream/resolve/main/tflite/soundstream_encoder.tflite" | |
], | |
"metadata": { | |
"id": "uV6sZMz5vjEh" | |
}, | |
"execution_count": null, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"######################################\n", | |
"# TEST ORIGINAL TFLITE MODEL #\n", | |
"######################################" | |
], | |
"metadata": { | |
"id": "C3piPrKAxYwg" | |
}, | |
"execution_count": 2, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"import tensorflow as tf\n", | |
"import numpy as np\n", | |
"\n", | |
"interpreter = tf.lite.Interpreter(model_path=\"soundstream_encoder.tflite\")\n", | |
"interpreter.allocate_tensors()\n", | |
"\n", | |
"# Get input and output tensors.\n", | |
"input_details = interpreter.get_input_details()\n", | |
"output_details = interpreter.get_output_details()\n", | |
"\n", | |
"# Test the model on random input data.\n", | |
"input_shape = input_details[0]['shape']\n", | |
"input_data = np.ones([1,320], dtype=np.float32)\n", | |
"interpreter.set_tensor(input_details[0]['index'], input_data)\n", | |
"\n", | |
"interpreter.invoke()\n", | |
"\n", | |
"# The function `get_tensor()` returns a copy of the tensor data.\n", | |
"# Use `tensor()` in order to get a pointer to the tensor.\n", | |
"output_data = interpreter.get_tensor(output_details[0]['index'])\n", | |
"print(output_data)" | |
], | |
"metadata": { | |
"colab": { | |
"base_uri": "https://localhost:8080/" | |
}, | |
"id": "6dhtRr-ru03Q", | |
"outputId": "c9496dcf-63af-45f2-bd0f-d6d81802920e" | |
}, | |
"execution_count": 3, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"name": "stdout", | |
"text": [ | |
"[[[ 4.676804 5.2019243 30.969244 29.179945 -13.947937\n", | |
" -20.232111 -18.917313 -2.0216348 71.469536 1.2933018\n", | |
" -41.780964 -9.701668 14.158117 41.52357 14.740092\n", | |
" 3.990265 15.806018 26.886602 23.316065 -9.295914\n", | |
" -24.134699 0.12286076 7.6399045 -2.2618842 -16.264719\n", | |
" 2.8481846 -12.516875 7.998949 12.299546 -39.252556\n", | |
" 18.228468 -16.146786 2.0147903 11.509988 19.276041\n", | |
" 0.6773272 -4.82661 -8.449988 -5.65711 26.005175\n", | |
" -2.749786 -28.497498 -32.08775 0.2983079 37.036697\n", | |
" -28.817059 -6.4624305 13.872892 6.280514 7.645826\n", | |
" -5.385664 -12.087726 1.0219012 -6.038858 -1.9798441\n", | |
" -2.2230446 2.0583751 -9.412288 1.2139101 -9.248306\n", | |
" 5.490324 -7.4410644 5.5857916 5.004754 ]]]\n" | |
] | |
} | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"######################################\n", | |
"# CONVERT TFLITE MODEL TO ONNX #\n", | |
"######################################" | |
], | |
"metadata": { | |
"id": "BZ5ZWGu-xg8o" | |
}, | |
"execution_count": 4, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"!pip install git+https://github.com/fatcat-z/tensorflow-onnx.git@remove_tflite_var_ops # A fix for tf2onnx provided by developer of @onnx/tensorflow-onnx - more info here https://github.com/onnx/tensorflow-onnx/issues/2059#issuecomment-1282726302" | |
], | |
"metadata": { | |
"id": "DHi0QvMkxQ4L" | |
}, | |
"execution_count": null, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"!python -m tf2onnx.convert --opset 17 --tflite soundstream_encoder.tflite --output soundstream_encoder.onnx --verbose --debug" | |
], | |
"metadata": { | |
"colab": { | |
"base_uri": "https://localhost:8080/" | |
}, | |
"id": "2PPVYstNxVH2", | |
"outputId": "ece0ba34-b592-47b6-e2a2-0887a77dc8bd" | |
}, | |
"execution_count": 6, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"name": "stdout", | |
"text": [ | |
"/usr/lib/python3.7/runpy.py:125: RuntimeWarning: 'tf2onnx.convert' found in sys.modules after import of package 'tf2onnx', but prior to execution of 'tf2onnx.convert'; this may result in unpredictable behaviour\n", | |
" warn(RuntimeWarning(msg))\n", | |
"2022-10-30 16:49:29,915 - INFO - tf2onnx: inputs: None\n", | |
"2022-10-30 16:49:29,916 - INFO - tf2onnx: outputs: None\n", | |
"2022-10-30 16:49:30,041 - INFO - tf2onnx.tfonnx: Using tensorflow=2.9.2, onnx=1.12.0, tf2onnx=1.12.0/5f209d\n", | |
"2022-10-30 16:49:30,041 - INFO - tf2onnx.tfonnx: Using opset <onnx, 17>\n", | |
"INFO: Created TensorFlow Lite XNNPACK delegate for CPU.\n", | |
"====== removing streamable_model_12/first_layerconv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/first_layerconv/conv1d_36/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/resnet_0aconv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/resnet_0aconv/separable_conv1d_36/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/resnet_0bconv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/resnet_1adepthwise_conv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/resnet_1apointwise_conv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/resnet_1bconv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/resnet_2adepthwise_conv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/resnet_2apointwise_conv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/resnet_2bconv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/simpleconv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_0/simpleconv/conv1d_37/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/resnet_0aconv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/resnet_0aconv/separable_conv1d_37/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/resnet_0bconv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/resnet_1adepthwise_conv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/resnet_1apointwise_conv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/resnet_1bconv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/resnet_2adepthwise_conv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/resnet_2apointwise_conv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/resnet_2bconv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/simpleconv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_1/simpleconv/conv1d_38/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/resnet_0aconv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/resnet_0aconv/separable_conv1d_38/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/resnet_0bconv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/resnet_1adepthwise_conv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/resnet_1apointwise_conv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/resnet_1bconv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/resnet_2adepthwise_conv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/resnet_2apointwise_conv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/resnet_2bconv/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/simpleconv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/encoder_2/simpleconv/conv1d_39/BiasAdd/ReadVariableOp\n", | |
"====== removing streamable_model_12/bottleneck_1/simpleconv/concat/ReadVariableOp\n", | |
"====== removing streamable_model_12/bottleneck_1/simpleconv/conv1d_40/BiasAdd/ReadVariableOp\n", | |
"2022-10-30 16:49:30,171 - VERBOSE - tf2onnx.tfonnx: Mapping TF node to ONNX node(s)\n", | |
"2022-10-30 16:49:30,174 - VERBOSE - tf2onnx.tfonnx: Mapping TF node to ONNX node(s)\n", | |
"2022-10-30 16:49:30,175 - VERBOSE - tf2onnx.tfonnx: Summay Stats:\n", | |
"\ttensorflow ops: Counter({'Const': 12})\n", | |
"\ttensorflow attr: Counter({'value': 12})\n", | |
"\tonnx mapped: Counter({'Const': 12})\n", | |
"\tonnx unmapped: Counter()\n", | |
"2022-10-30 16:49:30,180 - VERBOSE - tf2onnx.tfonnx: Mapping TF node to ONNX node(s)\n", | |
"2022-10-30 16:49:30,272 - VERBOSE - tf2onnx.tfonnx: Mapping TF node to ONNX node(s)\n", | |
"2022-10-30 16:49:30,342 - VERBOSE - tf2onnx.tfonnx: Summay Stats:\n", | |
"\ttensorflow ops: Counter({'Const': 86, 'TFL_RESHAPE': 47, 'TFL_CONV_2D': 23, 'TFL_LEAKY_RELU': 22, 'TFL_CONCATENATION': 14, 'TFL_STRIDED_SLICE': 14, 'TFL_DEPTHWISE_CONV_2D': 9, 'TFL_ADD': 9, 'Placeholder': 1, 'Identity': 1})\n", | |
"\ttensorflow attr: Counter({'value': 86, 'fused_activation_function': 55, 'dilation_h_factor': 32, 'dilation_w_factor': 32, 'padding': 32, 'stride_h': 32, 'stride_w': 32, 'alpha': 22, 'axis': 14, 'begin_mask': 14, 'ellipsis_mask': 14, 'end_mask': 14, 'new_axis_mask': 14, 'shrink_axis_mask': 14, 'depth_multiplier': 9, 'pot_scale_int16': 9})\n", | |
"\tonnx mapped: Counter({'Const': 56, 'Reshape': 47, 'Conv2D': 23, 'LeakyRelu': 22, 'TFL_CONCATENATION': 14, 'DepthwiseConv2dNative': 9, 'Add': 9, 'Placeholder': 1})\n", | |
"\tonnx unmapped: Counter()\n", | |
"2022-10-30 16:49:30,342 - INFO - tf2onnx.optimizer: Optimizing ONNX model\n", | |
"2022-10-30 16:49:30,343 - VERBOSE - tf2onnx.optimizer: Apply optimize_transpose\n", | |
"2022-10-30 16:49:30,426 - VERBOSE - tf2onnx.optimizer.TransposeOptimizer: Const +12 (65->77), Reshape +12 (56->68), Transpose -30 (128->98)\n", | |
"2022-10-30 16:49:30,427 - VERBOSE - tf2onnx.optimizer: Apply remove_redundant_upsample\n", | |
"2022-10-30 16:49:30,454 - VERBOSE - tf2onnx.optimizer.UpsampleOptimizer: no change\n", | |
"2022-10-30 16:49:30,454 - VERBOSE - tf2onnx.optimizer: Apply fold_constants\n", | |
"2022-10-30 16:49:30,530 - VERBOSE - tf2onnx.optimizer.ConstFoldOptimizer: Cast -47 (47->0), Const +8 (77->85), Reshape -18 (68->50), Transpose -55 (98->43)\n", | |
"2022-10-30 16:49:30,530 - VERBOSE - tf2onnx.optimizer: Apply const_dequantize_optimizer\n", | |
"2022-10-30 16:49:30,566 - VERBOSE - tf2onnx.optimizer.ConstDequantizeOptimizer: no change\n", | |
"2022-10-30 16:49:30,566 - VERBOSE - tf2onnx.optimizer: Apply loop_optimizer\n", | |
"2022-10-30 16:49:30,589 - VERBOSE - tf2onnx.optimizer.LoopOptimizer: no change\n", | |
"2022-10-30 16:49:30,590 - VERBOSE - tf2onnx.optimizer: Apply merge_duplication\n", | |
"2022-10-30 16:49:30,626 - VERBOSE - tf2onnx.optimizer.MergeDuplicatedNodesOptimizer: Const -26 (85->59)\n", | |
"2022-10-30 16:49:30,626 - VERBOSE - tf2onnx.optimizer: Apply reshape_optimizer\n", | |
"2022-10-30 16:49:30,645 - VERBOSE - tf2onnx.optimizer.ReshapeOptimizer: no change\n", | |
"2022-10-30 16:49:30,646 - VERBOSE - tf2onnx.optimizer: Apply global_pool_optimizer\n", | |
"2022-10-30 16:49:30,666 - VERBOSE - tf2onnx.optimizer.GlobalPoolOptimizer: no change\n", | |
"2022-10-30 16:49:30,666 - VERBOSE - tf2onnx.optimizer: Apply q_dq_optimizer\n", | |
"2022-10-30 16:49:30,685 - VERBOSE - tf2onnx.optimizer.QDQOptimizer: no change\n", | |
"2022-10-30 16:49:30,685 - VERBOSE - tf2onnx.optimizer: Apply remove_identity\n", | |
"2022-10-30 16:49:30,707 - VERBOSE - tf2onnx.optimizer.IdentityOptimizer: Identity -1 (1->0)\n", | |
"2022-10-30 16:49:30,707 - VERBOSE - tf2onnx.optimizer: Apply remove_back_to_back\n", | |
"2022-10-30 16:49:30,727 - VERBOSE - tf2onnx.optimizer.BackToBackOptimizer: Const -3 (59->56), Reshape -3 (50->47)\n", | |
"2022-10-30 16:49:30,727 - VERBOSE - tf2onnx.optimizer: Apply einsum_optimizer\n", | |
"2022-10-30 16:49:30,745 - VERBOSE - tf2onnx.optimizer.EinsumOptimizer: no change\n", | |
"2022-10-30 16:49:30,745 - VERBOSE - tf2onnx.optimizer: Apply optimize_transpose\n", | |
"2022-10-30 16:49:30,768 - VERBOSE - tf2onnx.optimizer.TransposeOptimizer: no change\n", | |
"2022-10-30 16:49:30,768 - VERBOSE - tf2onnx.optimizer: Apply remove_redundant_upsample\n", | |
"2022-10-30 16:49:30,789 - VERBOSE - tf2onnx.optimizer.UpsampleOptimizer: no change\n", | |
"2022-10-30 16:49:30,789 - VERBOSE - tf2onnx.optimizer: Apply fold_constants\n", | |
"2022-10-30 16:49:30,808 - VERBOSE - tf2onnx.optimizer.ConstFoldOptimizer: no change\n", | |
"2022-10-30 16:49:30,808 - VERBOSE - tf2onnx.optimizer: Apply const_dequantize_optimizer\n", | |
"2022-10-30 16:49:30,832 - VERBOSE - tf2onnx.optimizer.ConstDequantizeOptimizer: no change\n", | |
"2022-10-30 16:49:30,833 - VERBOSE - tf2onnx.optimizer: Apply loop_optimizer\n", | |
"2022-10-30 16:49:30,853 - VERBOSE - tf2onnx.optimizer.LoopOptimizer: no change\n", | |
"2022-10-30 16:49:30,853 - VERBOSE - tf2onnx.optimizer: Apply merge_duplication\n", | |
"2022-10-30 16:49:30,878 - VERBOSE - tf2onnx.optimizer.MergeDuplicatedNodesOptimizer: no change\n", | |
"2022-10-30 16:49:30,879 - VERBOSE - tf2onnx.optimizer: Apply reshape_optimizer\n", | |
"2022-10-30 16:49:30,896 - VERBOSE - tf2onnx.optimizer.ReshapeOptimizer: no change\n", | |
"2022-10-30 16:49:30,896 - VERBOSE - tf2onnx.optimizer: Apply global_pool_optimizer\n", | |
"2022-10-30 16:49:30,916 - VERBOSE - tf2onnx.optimizer.GlobalPoolOptimizer: no change\n", | |
"2022-10-30 16:49:30,917 - VERBOSE - tf2onnx.optimizer: Apply q_dq_optimizer\n", | |
"2022-10-30 16:49:30,936 - VERBOSE - tf2onnx.optimizer.QDQOptimizer: no change\n", | |
"2022-10-30 16:49:30,936 - VERBOSE - tf2onnx.optimizer: Apply remove_identity\n", | |
"2022-10-30 16:49:30,957 - VERBOSE - tf2onnx.optimizer.IdentityOptimizer: no change\n", | |
"2022-10-30 16:49:30,957 - VERBOSE - tf2onnx.optimizer: Apply remove_back_to_back\n", | |
"2022-10-30 16:49:30,975 - VERBOSE - tf2onnx.optimizer.BackToBackOptimizer: no change\n", | |
"2022-10-30 16:49:30,975 - VERBOSE - tf2onnx.optimizer: Apply einsum_optimizer\n", | |
"2022-10-30 16:49:30,999 - VERBOSE - tf2onnx.optimizer.EinsumOptimizer: no change\n", | |
"2022-10-30 16:49:31,004 - INFO - tf2onnx.optimizer: After optimization: Cast -47 (47->0), Const -9 (65->56), Identity -1 (1->0), Reshape -9 (56->47), Transpose -85 (128->43)\n", | |
"2022-10-30 16:49:31,036 - INFO - tf2onnx: \n", | |
"2022-10-30 16:49:31,036 - INFO - tf2onnx: Successfully converted TensorFlow model soundstream_encoder.tflite to ONNX\n", | |
"2022-10-30 16:49:31,036 - INFO - tf2onnx: Model inputs: ['serving_default_input_audio:0']\n", | |
"2022-10-30 16:49:31,037 - INFO - tf2onnx: Model outputs: ['StatefulPartitionedCall:0']\n", | |
"2022-10-30 16:49:31,037 - INFO - tf2onnx: ONNX model is saved at soundstream_encoder.onnx\n" | |
] | |
} | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"######################################\n", | |
"# INFERENCE THE ONNX MODEL #\n", | |
"######################################" | |
], | |
"metadata": { | |
"id": "YUMiu4QdxkBa" | |
}, | |
"execution_count": 8, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"!pip install onnxruntime onnx" | |
], | |
"metadata": { | |
"id": "kd3xsyiCzWs0" | |
}, | |
"execution_count": null, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"import onnx\n", | |
"onnx_model = onnx.load(\"soundstream_encoder.onnx\")\n", | |
"onnx.checker.check_model(onnx_model)" | |
], | |
"metadata": { | |
"id": "PVf5NukYtp_H" | |
}, | |
"execution_count": 9, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"source": [ | |
"import onnxruntime as ort\n", | |
"import numpy as np\n", | |
"\n", | |
"ort_sess = ort.InferenceSession(\"soundstream_encoder.onnx\")\n", | |
"outputs = ort_sess.run(None, {\"serving_default_input_audio:0\": np.ones([1,320], dtype=np.float32)})\n", | |
"\n", | |
"print(f'Outputs: \"{outputs}\"')" | |
], | |
"metadata": { | |
"id": "eRIS9dSEuOw1", | |
"colab": { | |
"base_uri": "https://localhost:8080/", | |
"height": 441 | |
}, | |
"outputId": "cf43c118-fcb6-4981-8fd1-e62f3eba9a30" | |
}, | |
"execution_count": 10, | |
"outputs": [ | |
{ | |
"output_type": "error", | |
"ename": "RuntimeException", | |
"evalue": "ignored", | |
"traceback": [ | |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | |
"\u001b[0;31mRuntimeException\u001b[0m Traceback (most recent call last)", | |
"\u001b[0;32m<ipython-input-10-8de59f8efefc>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mort_sess\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mort\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mInferenceSession\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"soundstream_encoder.onnx\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mort_sess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m\"serving_default_input_audio:0\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mones\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m320\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf'Outputs: \"{outputs}\"'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;32m/usr/local/lib/python3.7/dist-packages/onnxruntime/capi/onnxruntime_inference_collection.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, output_names, input_feed, run_options)\u001b[0m\n\u001b[1;32m 198\u001b[0m \u001b[0moutput_names\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_outputs_meta\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 199\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 200\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_names\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_feed\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrun_options\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 201\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mC\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mEPFail\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0merr\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 202\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_enable_fallback\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
"\u001b[0;31mRuntimeException\u001b[0m: [ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Non-zero status code returned while running Reshape node. Name:'streamable_model_12/first_layerconv/conv1d_36/BiasAdd;streamable_model_12/first_layerconv/conv1d_36/Conv1D/Squeeze;streamable_model_12/first_layerconv/conv1d_36/BiasAdd/ReadVariableOp;Conv1D;streamable_model_12/first_layerconv/conv1d_36/Conv1D__39' Status Message: /onnxruntime_src/onnxruntime/core/providers/cpu/tensor/reshape_helper.h:41 onnxruntime::ReshapeHelper::ReshapeHelper(const onnxruntime::TensorShape&, onnxruntime::TensorShapeVector&, bool) gsl::narrow_cast<int64_t>(input_shape.Size()) == size was false. The input tensor cannot be reshaped to the requested shape. Input shape:{1,320,1}, requested shape:{1,1,1,368}\n" | |
] | |
} | |
] | |
} | |
] | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment