This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
def nonzero(t): | |
print("t: ", t) # tensor([0, 0, 1, 1, 0, 0]) | |
# Flatten the input tensor | |
t_flat = t.flatten() # torch.flatten(t, 0, 0) | |
print( | |
"t_flat: ", t_flat | |
) # tensortensor([0, 0, 1, 1, 0, 0]), torch.Size([6]), #!torch.vtensor<[?],si64> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
iree-compile --iree-hal-target-backends=llvm-cpu model.linalg.mlir -o model.vmfb --dump-compilation-phases-to=./tmp/ | |
failed to translate executables | |
failed to translate executables | |
model.linalg.mlir:21:10: error: 'memref.alloca' op expected no unbounded stack allocations | |
%1 = tensor.empty(%dim) : tensor<?xi64> | |
^ | |
model.linalg.mlir:10:3: note: called from | |
func.func @main_graph(%arg0: tensor<?xi1>) -> tensor<1x1xi64> { | |
^ | |
model.linalg.mlir:21:10: note: see current operation: %14 = "memref.alloca"(%11) <{alignment = 64 : i64, operandSegmentSizes = array<i32: 1, 0>}> : (index) -> memref<?xi64> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Running MaskedFillTensorIntValueStaticModule_basic... | |
*** RUNNING TEST: MaskedScatterStaticBasic_basic *** | |
Compiling MaskedScatterStaticBasic_basic... | |
/proj/gdba/shark/chi/src/torch-mlir/mlir_venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset10.py:513: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). | |
return g.op("Constant", value_t=torch.tensor(list_or_value)) | |
==================== | |
ONNX RAW IR | |
module { | |
func.func @main_graph(%arg0: !torch.vtensor<[4,4],f32>, %arg1: !torch.vtensor<[4,4],i1>, %arg2: !torch.vtensor<[8,8],f32>) -> !torch.vtensor<[4,4],f32> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 20 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "2.6.0"} { |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
./build_tools/ci/test_posix.sh | |
::group::Run ONNX e2e integration tests | |
TORCH_VERSION_FOR_COMPARISON = 2.6.0.dev20241107 | |
Running tests sequentially with progress status | |
*** RUNNING TEST: AtenNonzero1DModule_one_nonzero *** | |
Compiling AtenNonzero1DModule_one_nonzero... | |
==================== | |
ONNX RAW IR | |
module { |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module { | |
func.func @tf2onnx(%arg0: !torch.vtensor<[?,768],f32>, %arg1: !torch.vtensor<[3],si64>, %arg2: !torch.vtensor<[?,256,768],f32>) -> ( !torch.vtensor<[?,256,768],f32>) attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "tf2onnx", torch.onnx_meta.producer_version = "1.5.2"} { | |
%reshape = torch.operator "onnx.Reshape"(%arg0, %arg1) : (!torch.vtensor<[?,768],f32>, !torch.vtensor<[3],si64>) -> !torch.vtensor<[?,256,768],f32> | |
%866 = torch.operator "onnx.Add"(%reshape, %arg2) : (!torch.vtensor<[?,256,768],f32>, !torch.vtensor<[?,256,768],f32>) -> !torch.vtensor<[?,256,768],f32> | |
return %866 : !torch.vtensor<[?,256,768],f32> | |
} | |
} |
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<unknown>:0: error: failed to legalize unresolved materialization from ('i64') to 'index' that remained live after conversion | |
<unknown>:0: note: see current operation: %452 = "builtin.unrealized_conversion_cast"(%451) : (i64) -> index | |
/proj/gdba/shark/chi/src/SHARK-TestSuite/alt_e2eshark/test-run/model--long-t5-tglobal-base-16384-book-summary--pszemraj/model.modified.mlir:1299:12: note: see existing live user here: %2529 = hal.interface.binding.subspan layout(<constants = 20, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%3) flags("ReadOnly|Indirect") : memref<i64, strided<[], offset: 24>> | |
%360 = linalg.generic {indexing_maps = [#map12, #map12, #map12, #map12], iterator_types = []} ins(%357, %358, %359 : tensor<i1>, tensor<i64>, tensor<i64>) outs(%94 : tensor<i64 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module { | |
func.func @main_graph(%arg5:!torch.vtensor<[2708],f32>, %arg1: !torch.vtensor<[?],si64>, %arg2: !torch.vtensor<[?],f32>) -> !torch.vtensor<[2708],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "2.1.0"} { | |
%59 = torch.operator "onnx.ScatterElements"(%arg5, %arg1, %arg2) {torch.onnx.axis = 0 : si64, torch.onnx.reduction = "add"} : (!torch.vtensor<[2708],f32>, !torch.vtensor<[?],si64>, !torch.vtensor<[?],f32>) -> !torch.vtensor<[2708],f32> | |
return %59 : !torch.vtensor<[2708],f32> | |
} | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
python ./run.py --tolerance 0.001 0.001 --cachedir /proj/gdba/shark/cache --ireebuild ../../iree-build -f onnx -g models --mode onnx --report -j 12 -r test-ru | |
n-vision_int8 --testsfile list1_vision_int8_run | |
/proj/gdba/shark/chi/src/SHARK-TestSuite/e2eshark/e2e_venv/lib/python3.10/site-packages/torchvision/io/image.py:14: UserWarning: Failed to load image Python extension: '/proj/gdba/shark/chi/src/SHARK-TestSuite/e2eshark/e2e_venv/lib/python3.10/site-packages/torchvision/image.so: undefined symbol: _ZNK3c1011StorageImpl27throw_data_ptr_access_errorEv'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? | |
warn( | |
Starting e2eshark tests. Using 12 processes | |
Cache Directory: /proj/gdba/shark/cache | |
Tolerance for comparing floating point (atol, rtol) = (0.001, 0.001) | |
Note: No Torch MLIR build provided using --torchmlir |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
hal.executable public @torch_jit_dispatch_33 { | |
hal.executable.variant public @embedded_elf_x86_64 target(<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>) { | |
hal.executable.export public @torch_jit_dispatch_33_quantized_batch_matmul_56x56x512x128_i8xi8xi32xi32xi32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>], flags = Indirect>]>) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>, #hal.interface.binding<0, 2>]} { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @torch_jit_dispatch_33_quantized_batch_matmul_5 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
hal.executable public @torch_jit_dispatch_25 { | |
hal.executable.variant public @embedded_elf_x86_64 target(<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>) { | |
hal.executable.export public @torch_jit_dispatch_25_quantized_batch_matmul_56x56x128x512_i8xi8xi32xi32xi32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>], flags = Indirect>]>) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>, #hal.interface.binding<0, 2>]} { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @torch_jit_dispatch_25_quantized_batch_matmul_5 |