Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save archana-ramalingam/0e031403e793cf8f0ee8a8d118a184be to your computer and use it in GitHub Desktop.
Save archana-ramalingam/0e031403e793cf8f0ee8a8d118a184be to your computer and use it in GitHub Desktop.
IR dump for reducelogsumexp
//===-------------------------------------------===//
Legalizing operation : 'func.func'(0x55c90793ecc0) {
* Fold {
} -> FAILURE : unable to fold
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.operator'(0x55c907983b50) {
%0 = "torch.operator"(%arg0, %arg1) <{name = "onnx.ReduceLogSumExp"}> {torch.onnx.keepdims = 1 : si64} : (!torch.vtensor<[3,2,2],f32>, !torch.vtensor<[0],si64>) -> !torch.vtensor<[1,1,1],f32>
* Fold {
} -> FAILURE : unable to fold
* Pattern : 'torch.operator -> ()' {
Trying to match ""
** Insert : 'torch.constant.int'(0x55c9079713c0)
** Insert : 'torch.constant.none'(0x55c907971420)
** Insert : 'torch.constant.bool'(0x55c907971480)
** Insert : 'torch.aten.to.dtype'(0x55c907976e80)
** Insert : 'torch.aten.exp'(0x55c9079825d0)
** Replace : 'torch.operator'(0x55c907983b50)
** Insert : 'torch.aten.log'(0x55c9079715a0)
** Insert : 'torch.constant.int'(0x55c907929ce0)
** Insert : 'torch.aten.to.dtype'(0x55c90790bd30)
** Replace : 'torch.operator'(0x55c907983b50)
"" result 1
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x55c9079713c0) {
%0 = "torch.constant.int"() <{value = 7 : i64}> : () -> !torch.int
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.none'(0x55c907971420) {
%1 = "torch.constant.none"() : () -> !torch.none
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.bool'(0x55c907971480) {
%2 = "torch.constant.bool"() <{value = false}> : () -> !torch.bool
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.to.dtype'(0x55c907976e80) {
%3 = "torch.aten.to.dtype"(%arg0, %0, %2, %2, %1) : (!torch.vtensor<[3,2,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none) -> !torch.vtensor<[3,2,2],f64>
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.exp'(0x55c9079825d0) {
%4 = "torch.aten.exp"(%3) : (!torch.vtensor<[3,2,2],f64>) -> !torch.vtensor<[3,2,2],f64>
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.log'(0x55c9079715a0) {
%5 = "torch.aten.log"(%arg0) : (!torch.vtensor<[3,2,2],f32>) -> !torch.vtensor<[3,2,2],f64>
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.constant.int'(0x55c907929ce0) {
%6 = "torch.constant.int"() <{value = 6 : i64}> : () -> !torch.int
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.to.dtype'(0x55c90790bd30) {
%7 = "torch.aten.to.dtype"(%5, %6, %2, %2, %1) : (!torch.vtensor<[3,2,2],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none) -> !torch.vtensor<[1,1,1],f32>
} -> SUCCESS : operation marked legal by the target
//===-------------------------------------------===//
} -> SUCCESS : pattern applied successfully
// *** IR Dump After Pattern Application ***
func.func @test_reduce_log_sum_exp_default_axes_keepdims_example(%arg0: !torch.vtensor<[3,2,2],f32>, %arg1: !torch.vtensor<[0],si64>) -> !torch.vtensor<[1,1,1],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 18 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
%int7 = torch.constant.int 7
%none = torch.constant.none
%false = torch.constant.bool false
%0 = torch.aten.to.dtype %arg0, %int7, %false, %false, %none : !torch.vtensor<[3,2,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[3,2,2],f64>
%1 = torch.aten.exp %0 : !torch.vtensor<[3,2,2],f64> -> !torch.vtensor<[3,2,2],f64>
%2 = torch.aten.log %arg0 : !torch.vtensor<[3,2,2],f32> -> !torch.vtensor<[3,2,2],f64>
%int6 = torch.constant.int 6
%3 = torch.aten.to.dtype %2, %int6, %false, %false, %none : !torch.vtensor<[3,2,2],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,1,1],f32>
%4 = torch.operator "onnx.ReduceLogSumExp"(%arg0, %arg1) {torch.onnx.keepdims = 1 : si64} : (!torch.vtensor<[3,2,2],f32>, !torch.vtensor<[0],si64>) -> !torch.vtensor<[1,1,1],f32>
return %4 : !torch.vtensor<[1,1,1],f32>
}
} -> SUCCESS
//===-------------------------------------------===//
//===-------------------------------------------===//
Legalizing operation : 'func.return'(0x55c90795a190) {
"func.return"(%8) : (!torch.vtensor<[1,1,1],f32>) -> ()
* Fold {
} -> FAILURE : unable to fold
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
torch-mlir-opt: /home/aramalin/Documents/Nod/torch-mlir/externals/llvm-project/mlir/lib/IR/Operation.cpp:514: void llvm::ilist_traits<mlir::Operation>::removeNodeFromList(Operation *): Assertion `op->block && "not already in an operation block!"' failed.
PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace.
Stack dump:
0. Program arguments: build/bin/torch-mlir-opt --split-input-file -convert-torch-onnx-to-torch test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir --debug --mlir-print-ir-after-all
Stack dump without symbol names (ensure you have llvm-symbolizer in your PATH or set the environment var `LLVM_SYMBOLIZER_PATH` to point to it):
0 torch-mlir-opt 0x000055c903b03c1d
1 torch-mlir-opt 0x000055c903b0410b
2 torch-mlir-opt 0x000055c903b02176
3 torch-mlir-opt 0x000055c903b048c5
4 libc.so.6 0x00007f64ab842520
5 libc.so.6 0x00007f64ab8969fc pthread_kill + 300
6 libc.so.6 0x00007f64ab842476 raise + 22
7 libc.so.6 0x00007f64ab8287f3 abort + 211
8 libc.so.6 0x00007f64ab82871b
9 libc.so.6 0x00007f64ab839e96
10 torch-mlir-opt 0x000055c90397a242
11 torch-mlir-opt 0x000055c9023f6831
12 torch-mlir-opt 0x000055c9023f67e8
13 torch-mlir-opt 0x000055c9023f6252
14 torch-mlir-opt 0x000055c9023d333b
15 torch-mlir-opt 0x000055c9023b5dd7
16 torch-mlir-opt 0x000055c9023bb593
17 torch-mlir-opt 0x000055c9023c00b9
18 torch-mlir-opt 0x000055c9023c01bd
19 torch-mlir-opt 0x000055c900c4eff2
20 torch-mlir-opt 0x000055c90252762b
21 torch-mlir-opt 0x000055c9025275c5
22 torch-mlir-opt 0x000055c8ffad7d29
23 torch-mlir-opt 0x000055c90252a365
24 torch-mlir-opt 0x000055c902522d83
25 torch-mlir-opt 0x000055c902523304
26 torch-mlir-opt 0x000055c902528875
27 torch-mlir-opt 0x000055c9025284f9
28 torch-mlir-opt 0x000055c9025245bb
29 torch-mlir-opt 0x000055c902523eca
30 torch-mlir-opt 0x000055c9025239f7
31 torch-mlir-opt 0x000055c902527616
32 torch-mlir-opt 0x000055c9025275c5
33 torch-mlir-opt 0x000055c8ffad7d29
34 torch-mlir-opt 0x000055c90252a365
35 torch-mlir-opt 0x000055c902522d83
36 torch-mlir-opt 0x000055c902523304
37 torch-mlir-opt 0x000055c902524d48
38 torch-mlir-opt 0x000055c902524c72
39 torch-mlir-opt 0x000055c8ffa79582
40 torch-mlir-opt 0x000055c8ffa791b8
41 torch-mlir-opt 0x000055c8ffa78f9c
42 torch-mlir-opt 0x000055c8ffa78f36
43 torch-mlir-opt 0x000055c9039ec642
44 torch-mlir-opt 0x000055c9039ec568
45 torch-mlir-opt 0x000055c9039ec39e
46 torch-mlir-opt 0x000055c9039ec2ea
47 torch-mlir-opt 0x000055c9039ec203
48 torch-mlir-opt 0x000055c8ffa75865
49 torch-mlir-opt 0x000055c8ffa75bc9
50 torch-mlir-opt 0x000055c8ffa75d98
51 torch-mlir-opt 0x000055c8ffa71225
52 libc.so.6 0x00007f64ab829d90
53 libc.so.6 0x00007f64ab829e40 __libc_start_main + 128
54 torch-mlir-opt 0x000055c8ffa710b5
My MLIR test:
// CHECK-LABEL: func.func @test_reduce_log_sum_exp_default_axes_keepdims_example
func.func @test_reduce_log_sum_exp_default_axes_keepdims_example(%arg0: !torch.vtensor<[3,2,2],f32>, %arg1: !torch.vtensor<[0],si64>) -> !torch.vtensor<[1,1,1],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 18 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
// CHECK: %[[INT0:.+]] = torch.constant.int 0
// CHECK: %[[DIMS:.+]] = torch.prim.ListConstruct : () -> !torch.list<int>
// CHECK: %[[INT7:.+]] = torch.constant.int 7
// CHECK: %[[NONE:.+]] = torch.constant.none
// CHECK: %[[FALSE:.+]] = torch.constant.bool false
// CHECK: %[[CAST:.+]] = torch.aten.to.dtype %arg0, %[[INT7]], %[[FALSE]], %[[FALSE]], %[[NONE]] : !torch.vtensor<[3,2,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[3,2,2],f32>
// CHECK: %[[EXP:.+]] = torch.aten.exp %[[CAST]] : !torch.vtensor<[3,2,2],f32> -> !torch.vtensor<[3,2,2],f32>
// CHECK: %[[TRUE:.+]] = torch.constant.bool true
// CHECK: %[[SUM:.+]] = torch.aten.sum.dim_IntList %[[EXP]], %[[DIMS]], %[[TRUE]], %[[NONE]] : !torch.vtensor<[3,2,2],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,1,1],f32>
// CHECK: %[[LOG:.+]] = torch.aten.log %[[SUM]] : !torch.vtensor<[1,1,1],f32> -> !torch.vtensor<[1,1,1],f32>
// CHECK: %[[CASTLIKE:.+]] = torch.aten.to.dtype %[[LOG]], %[[INT7]], %[[FALSE]], %[[FALSE]], %[[NONE]] : !torch.vtensor<[1,1,1],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,1,1],f32>
// CHECK: return %[[CASTLIKE]] : !torch.vtensor<[1,1,1],f32>
%0 = torch.operator "onnx.ReduceLogSumExp"(%arg0, %arg1) {torch.onnx.keepdims = 1 : si64} : (!torch.vtensor<[3,2,2],f32>, !torch.vtensor<[0],si64>) -> !torch.vtensor<[1,1,1],f32>
return %0 : !torch.vtensor<[1,1,1],f32>
}
// -----
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment