Skip to content

Instantly share code, notes, and snippets.

View pashu123's full-sized avatar
๐Ÿ˜‡
Working from home

Prashant Kumar pashu123

๐Ÿ˜‡
Working from home
View GitHub Profile
<unknown>:0: error: invalid size -9223372036854775808 for !torch.tensor type
iree-compile: iree/third_party/llvm-project/mlir/include/mlir/IR/StorageUniquerSupport.h:180: static ConcreteT mlir::detail::StorageUserBase<mlir::torch::Torch::ValueTensorType, mlir::torch::Torch::BaseTensorType, mlir::torch::Torch::detail::ValueTensorTypeStorage, mlir::detail::TypeUniquer>::get(MLIRContext *, Args &&...) [ConcreteT = mlir::torch::Torch::ValueTensorType, BaseT = mlir::torch::Torch::BaseTensorType, StorageT = mlir::torch::Torch::detail::ValueTensorTypeStorage, UniquerT = mlir::detail::TypeUniquer, Traits = <>, Args = <std::optional<llvm::ArrayRef<long>> &, mlir::Type &, mlir::Attribute &>]: Assertion `succeeded( ConcreteT::verifyInvariants(getDefaultDiagnosticEmitFn(ctx), args...))' failed.
Please report issues to https://github.com/iree-org/iree/issues and include the crash backtrace.
Stack dump:
0. Program arguments: iree-compile --iree-hal-target-backends=llvm-cpu new_onnx.mlir -o abc.vmfb --iree-llvmcpu-target-cp
#pipeline_layout = #hal.pipeline.layout<constants = 4, bindings = [
#hal.pipeline.binding<storage_buffer>
]>
hal.executable.source public @executable {
hal.executable.export public @write_constants ordinal(0) layout(#pipeline_layout) attributes {workgroup_size = [1 : index, 1 : index, 1 : index]} {
^bb0(%arg0: !hal.device):
%c1 = arith.constant 1 : index
hal.return %c1, %c1, %c1 : index, index, index
}
// -----// IR Dump After ConvertTorchOnnxToTorch (convert-torch-onnx-to-torch) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,64,88,88],f32>) -> !torch.vtensor<[?,256,88,88],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%0 = torch.vtensor.literal(dense<[[[-0.162511453, 0.196854442, -0.89627254, 0.699266493, 0.930536746]]]> : tensor<1x1x5xf32>) : !torch.vtensor<[1,1,5],f32>
%1 = torch.vtensor.literal(dense<0.000000e+00> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32>
%2 = torch.vtensor.literal(dense<"0x4684C23E7C2E86BF686622BF042F8D3F2FF0803E7CC5683F8810C9BFB750363FCE7E0DC07BCD63C0878B86BF633820BF449DBA3F88D3B0BE83C9FA3FF5A6ECBF078D44401E80D2BE524F17C01BB8823DE069453DA60F85404569F3BD599F823F0DFAA1C0CD896EBF3C98434084EF7C408067A63E4088613EAE5AA93E22BF5D3F50D6083E75EE51BF00E69BBF478287403E5C6E3FB6B11340D392C4BE4C439B3E3A22B13FCC3B4F3F34F554BD6
This file has been truncated, but you can view the full file.
// -----// IR Dump After ConvertTorchOnnxToTorch (convert-torch-onnx-to-torch) //----- //
func.func @main_graph(%arg0: !torch.vtensor<[?,?],si64>, %arg1: !torch.vtensor<[?,?],si64>, %arg2: !torch.vtensor<[?,?],si64>, %arg3: !torch.vtensor<[?,?,?],si64>, %arg4: !torch.vtensor<[4],si64>, %arg5: !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?,128,384],i1> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "2.4.0"} {
%0 = torch.vtensor.literal(dense<0> : tensor<si64>) : !torch.vtensor<[],si64>
%int0 = torch.constant.int 0
%int0_0 = torch.constant.int 0
%1 = torch.aten.select.int %arg4, %int0, %int0_0 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[],si64>
%2 = torch.aten.item %1 : !torch.vtensor<[],si64> -> !torch.int
%int1 = torch.constant.int 1
%3 = torch.aten.select.int %arg4, %int0, %int1 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtens
module {
func.func @main_graph(%arg0: !torch.vtensor<[?,?],si64>, %arg1: !torch.vtensor<[?,?],si64>, %arg2: !torch.vtensor<[?,?],si64>, %arg3: !torch.vtensor<[?,?,?],si64>, %arg4: !torch.vtensor<[4],si64>, %arg5: !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,?,128,384],i1> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "2.4.0"} {
%1 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<0> : tensor<si64>} : () -> !torch.vtensor<[],si64>
%2 = torch.operator "onnx.Pad"(%arg1, %arg4, %1) {torch.onnx.mode = "constant"} : (!torch.vtensor<[?,?],si64>, !torch.vtensor<[4],si64>, !torch.vtensor<[],si64>) -> !torch.vtensor<[?,?],si64>
%3 = torch.operator "onnx.Constant"() {torch.onnx.value = dense<1> : tensor<si64>} : () -> !torch.vtensor<[],si64>
%4 = torch.operator "onnx.Gather"(%arg5, %3) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[2],si64>, !torch.vtensor<[],si6
hal.executable public @main_graph$async_dispatch_3 {
hal.executable.variant public @embedded_elf_x86_64 target(<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512
This file has been truncated, but you can view the full file.
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
func.func @test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c40896 = arith.constant 40896 : index
%c3720640 = arith.constant 3720640 : index
%c259584 = arith.constant 259584 : index
%c605184 = arith.constant 605184 : index
This file has been truncated, but you can view the full file.
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
func.func @test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c40896 = arith.constant 40896 : index
%c3720640 = arith.constant 3720640 : index
%c259584 = arith.constant 259584 : index
%c605184 = arith.constant 605184 : index
module {
func.func @test_tfidfvectorizer_tf_uniandbigrams_skip5(%arg0: !torch.vtensor<[12],si32>) -> !torch.vtensor<[7],f32> attributes {torch.onnx_meta.ir_version = 4 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
%none = torch.constant.none
%0 = torch.operator "onnx.TfIdfVectorizer"(%arg0) {torch.onnx.max_gram_length = 2 : si64, torch.onnx.max_skip_count = 5 : si64, torch.onnx.min_gram_length = 1 : si64, torch.onnx.mode = "TF", torch.onnx.ngram_counts = [0 : si64, 4 : si64], torch.onnx.ngram_indexes = [0 : si64, 1 : si64, 2 : si64, 3 : si64, 4 : si64, 5 : si64, 6 : si64], torch.onnx.pool_int64s = [2 : si64, 3 : si64, 5 : si64, 4 : si64, 5 : si64, 6 : si64, 7 : si64, 8 : si64, 6 : si64, 7 : si64]} : (!torch.vtensor<[12],si32>) -> !torch.vtensor<[7],f32>
return %0 : !torch.vtensor<[7],f32>
}
}
This file has been truncated, but you can view the full file.
Args: iree-opt --pass-pipeline=builtin.module(func.func(iree-codegen-tile-and-distribute-to-workgroups-using-forall-op, cse)) --mlir-print-local-scope --split-input-file before_scf.mlir --debug
ImplicitTypeIDRegistry::lookupOrInsert(mlir::chlo::ChloDialect)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::stablehlo::StablehloDialect)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::vhlo::VhloDialect)
Load new dialect in Context builtin
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemRefLayoutAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::TypedAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ElementsAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::DistinctAttr)