Last active
December 13, 2024 05:05
-
-
Save AmosLewis/dd31ab37517977b1c499d06495b4adc2 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
cmake -GNinja -Bbuild \ | |
-DCMAKE_BUILD_TYPE=Debug \ | |
-DCMAKE_C_COMPILER=clang \ | |
-DCMAKE_CXX_COMPILER=clang++ \ | |
-DPython3_FIND_VIRTUALENV=ONLY \ | |
-DLLVM_ENABLE_PROJECTS=mlir \ | |
-DLLVM_EXTERNAL_PROJECTS="torch-mlir;torch-mlir-dialects" \ | |
-DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR=`pwd` \ | |
-DLLVM_EXTERNAL_TORCH_MLIR_DIALECTS_SOURCE_DIR=`pwd`/externals/llvm-external-projects/torch-mlir-dialects \ | |
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \ | |
-DLLVM_TARGETS_TO_BUILD=host \ | |
externals/llvm-project/llvm | |
cmake --build build --target tools/torch-mlir/all | |
cmake -GNinja -Bbuild \ | |
-DCMAKE_BUILD_TYPE=Debug \ | |
-DCMAKE_C_COMPILER=clang \ | |
-DCMAKE_CXX_COMPILER=clang++ \ | |
-DPython3_FIND_VIRTUALENV=ONLY \ | |
-DLLVM_ENABLE_PROJECTS=mlir \ | |
-DLLVM_EXTERNAL_PROJECTS="torch-mlir;torch-mlir-dialects" \ | |
-DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR=`pwd` \ | |
-DLLVM_EXTERNAL_TORCH_MLIR_DIALECTS_SOURCE_DIR=`pwd`/externals/llvm-external-projects/torch-mlir-dialects \ | |
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \ | |
-DTORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS=ON \ | |
-DLLVM_TARGETS_TO_BUILD=host \ | |
externals/llvm-project/llvm | |
cmake --build build --target tools/torch-mlir/all | |
git submodule update --init --progressgit submodule update --init --progress | |
git add -u | |
git commit --amend --no-edit | |
git reset --hard HEAD~1 | |
git push origin as_stride --force | |
pip3 install clang-format | |
git clang-format HEAD~1 | |
torch-mlir-opt -convert-torch-to-tosa /tmp/index.mlir | externals/llvm-project/mlir/utils/generate-test-checks.py | |
--convert-torch-to-linalg | |
--torch-backend-to-linalg-on-tensors-backend-pipeline | |
torch-mlir-opt --convert-torch-onnx-to-torch --torch-decompose-complex-ops --cse --canonicalize --convert-torch-to-linalg reshape.default.onnx.mlir --debug | |
torch-mlir-opt --convert-torch-onnx-to-torch --torch-lower-to-backend-contract --torch-scalarize-shapes --torch-shape-refinement-pipeline --torch-backend-to-linalg-on-tensors-backend-pipeline onnx.mlir | |
torch-mlir-opt -convert-torch-to-tosa /tmp/index.mlir -mlir-print-ir-after-all -mlir-disable-threading --mlir-print-ir-before-all --debug | |
torch-mlir-opt --mlir-elide-elementsattrs-if-larger=400 --mlir-elide-resource-strings-if-larger=400 model.mlir > model.elide.mlir | |
grep -r "AveragePool" Inception_v4_vaiq_int8.default.torch-onnx.mlir | |
iree-compile --iree-vm-bytecode-module-output-format=flatbuffer-binary --dump-compilation-phases-to=./model-phases-rocm-Reshape_3/ /proj/gdba/shark/chi/src/SHARK-TestSuite/alt_e2eshark/test-run/mygpt4_trunc_Reshape_3/model.torch_onnx.mlir -o model_direct_Reshape_3.vmfb | |
iree-run-module --trace_execution=true --print_statistics=true --module=compiled_model.vmfb --function=tf2onnx --input="1x4xsi32=1" | |
upload big zip file from vm to az storave
az storage blob upload --account-name onnxstorage --container-name onnxstorage --name bugcases/torchtolinalgpipelineissue.zip --file torchtolinalgpipelineissue.zip --auth-mode key
pip install \
--find-links https://github.com/llvm/torch-mlir-release/releases/expanded_assets/dev-wheels \
--upgrade \
torch-mlir
pip install \
--find-links https://iree.dev/pip-release-links.html \
--upgrade \
iree-compiler \
iree-runtime
run iree_tests
pytest SHARK-TestSuite/iree_tests/onnx/ \
-rpfE \
--numprocesses 24 \
--timeout=30 \
--durations=20 \
--no-skip-tests-missing-files \
--config-files=/proj/gdba/shark/chi/src/iree/build_tools/pkgci/external_test_suite/onnx_cpu_llvm_sync.json \
--report-log=/proj/gdba/shark/chi/src/iree_log.txt
search all sub directory for a string.
grep -R "torch.aten.tensor" *
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Shark-TestSuites/e2eshark useful cmd:
Run one onnx model:
python ./run.py --torchmlirbuild ../../torch-mlir/build --tolerance 0.001 0.001 --cachedir ./huggingface_cache --runupto torch-mlir --torchtolinalg --ireebuild ../../iree-build --tests onnx/models/retinanet_resnet50_fpn_vaiq_int8
Run all onnx model:
python ./run.py --torchmlirbuild ../../torch-mlir/build --tolerance 0.001 0.001 --cachedir ./huggingface_cache --runupto iree-compile --torchtolinalg --ireebuild ../../iree-build --report
Run one op:
python run.py -c ../../torch-mlir/build/ -i ../../iree-build/ -f onnx --tests onnx/operators/ReduceProdKeepdims0 --cachedir cachedir --report --runupto torch-mlir --torchtolinalg
Run all the pytorch model
python ./run.py --torchmlirbuild ../../torch-mlir/build --tolerance 0.001 0.001 --cachedir ./huggingface_cache --ireebuild ../../ iree-build -runupto iree-compile -f pytorch -g models --mode onnx
Run one pytorch model
python ./run.py --torchmlirbuild ../../torch-mlir/build --tolerance 0.001 0.001 --cachedir ./huggingface_cache --ireebuild ../../ iree-build -runupto iree-compile -f pytorch -g models --mode onnx --tests onnx/models/retinanet_resnet50_fpn_vaiq_int8