Skip to content

Instantly share code, notes, and snippets.

@pashu123
Created December 18, 2024 15:40
Show Gist options
  • Save pashu123/3f1acea65c82e6d0a65f4e337f9f53ff to your computer and use it in GitHub Desktop.
Save pashu123/3f1acea65c82e6d0a65f4e337f9f53ff to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
// -----// IR Dump After ConvertTorchOnnxToTorch (convert-torch-onnx-to-torch) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%int0_0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0_0 : (!torch.int, !torch.int) -> !torch.list<int>
%int1 = torch.constant.int 1
%int1_1 = torch.constant.int 1
%int1_2 = torch.constant.int 1
%int1_3 = torch.constant.int 1
%int0_4 = torch.constant.int 0
%3 = torch.prim.ListConstruct %int1, %int1_1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.prim.ListConstruct %int1_2, %int1_3 : (!torch.int, !torch.int) -> !torch.list<int>
%5 = torch.prim.ListConstruct %int0_4, %int0_4 : (!torch.int, !torch.int) -> !torch.list<int>
%false = torch.constant.bool false
%int1_5 = torch.constant.int 1
%6 = torch.aten.convolution %arg0, %0, %1, %4, %2, %3, %false, %5, %int1_5 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %6 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After DecomposeComplexOps (torch-decompose-complex-ops) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%5 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%6 = torch.aten.convolution %arg0, %0, %1, %4, %2, %3, %false, %5, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %6 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%5 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%6 = torch.aten.convolution %arg0, %0, %1, %4, %2, %3, %false, %5, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %6 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ReifyShapeCalculations (torch-reify-shape-calculations) //----- //
module {
func.func private @__torch__.torch.jit._shape_functions.conv_forwards(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%true = torch.constant.bool true
%int0 = torch.constant.int 0
%int1 = torch.constant.int 1
%int2 = torch.constant.int 2
%0 = torch.aten.len.t %arg5 : !torch.list<int> -> !torch.int
%1 = torch.aten.gt.int %0, %int0 : !torch.int, !torch.int -> !torch.bool
%2 = torch.aten.len.t %arg7 : !torch.list<int> -> !torch.int
%3 = torch.aten.gt.int %2, %int0 : !torch.int, !torch.int -> !torch.bool
%4 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int
%5 = torch.prim.ListConstruct : () -> !torch.list<int>
%6 = torch.prim.If %arg6 -> (!torch.int) {
torch.prim.If.yield %int1 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
%7 = torch.aten.__getitem__.t %arg0, %int0 : !torch.list<int>, !torch.int -> !torch.int
%8 = torch.aten.append.t %5, %7 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If %arg6 -> () {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.mul.int %10, %arg8 : !torch.int, !torch.int -> !torch.int
%12 = torch.aten.append.t %5, %11 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.append.t %5, %10 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
%9 = torch.aten.__range_length %int2, %4, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
torch.prim.Loop %9, %true, init() {
^bb0(%arg9: !torch.int):
%10 = torch.aten.__derive_index %arg9, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%11 = torch.prim.If %1 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg5, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int1 : !torch.int
}
%12 = torch.prim.If %3 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg7, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
torch.prim.If %arg6 -> () {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%17 = torch.aten.sub.int %16, %int1 : !torch.int, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg3, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %17, %19 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.__getitem__.t %arg4, %21 : !torch.list<int>, !torch.int -> !torch.int
%23 = torch.aten.mul.int %22, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.sub.int %20, %23 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.add.int %24, %15 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %12 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.add.int %26, %int1 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.append.t %5, %27 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.add.int %15, %int1 : !torch.int, !torch.int -> !torch.int
%17 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg4, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %19, %int2 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.add.int %17, %20 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.sub.int %21, %16 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.__getitem__.t %arg3, %23 : !torch.list<int>, !torch.int -> !torch.int
%25 = torch.aten.floordiv.int %22, %24 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %int1 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.append.t %5, %26 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
return %5 : !torch.list<int>
}
func.func private @__torch_mlir_shape_fn.aten.convolution(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%0 = call @__torch__.torch.jit._shape_functions.conv_forwards(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6, %arg7, %arg8) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>
return %0 : !torch.list<int>
}
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%5 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%6 = torch.shape.calculate {
%7 = torch.aten.convolution %arg0, %0, %1, %4, %2, %3, %false, %5, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %7 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%7 = torch.aten.size %arg0 : !torch.vtensor<[1,128,4,256],f32> -> !torch.list<int>
%8 = torch.aten.size %0 : !torch.vtensor<[257,128,1,1],f32> -> !torch.list<int>
%9 = torch.aten.size %1 : !torch.vtensor<[257],f32> -> !torch.list<int>
%10 = torch.derefine %9 : !torch.list<int> to !torch.optional<list<int>>
%11 = func.call @__torch_mlir_shape_fn.aten.convolution(%7, %8, %10, %4, %2, %3, %false, %5, %int1) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %11 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %6 : !torch.vtensor<[1,257,4,256],f32>
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func private @__torch__.torch.jit._shape_functions.conv_forwards(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%true = torch.constant.bool true
%int0 = torch.constant.int 0
%int1 = torch.constant.int 1
%int2 = torch.constant.int 2
%0 = torch.aten.len.t %arg5 : !torch.list<int> -> !torch.int
%1 = torch.aten.gt.int %0, %int0 : !torch.int, !torch.int -> !torch.bool
%2 = torch.aten.len.t %arg7 : !torch.list<int> -> !torch.int
%3 = torch.aten.gt.int %2, %int0 : !torch.int, !torch.int -> !torch.bool
%4 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int
%5 = torch.prim.ListConstruct : () -> !torch.list<int>
%6 = torch.prim.If %arg6 -> (!torch.int) {
torch.prim.If.yield %int1 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
%7 = torch.aten.__getitem__.t %arg0, %int0 : !torch.list<int>, !torch.int -> !torch.int
%8 = torch.aten.append.t %5, %7 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If %arg6 -> () {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.mul.int %10, %arg8 : !torch.int, !torch.int -> !torch.int
%12 = torch.aten.append.t %5, %11 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.append.t %5, %10 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
%9 = torch.aten.__range_length %int2, %4, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
torch.prim.Loop %9, %true, init() {
^bb0(%arg9: !torch.int):
%10 = torch.aten.__derive_index %arg9, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%11 = torch.prim.If %1 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg5, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int1 : !torch.int
}
%12 = torch.prim.If %3 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg7, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
torch.prim.If %arg6 -> () {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%17 = torch.aten.sub.int %16, %int1 : !torch.int, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg3, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %17, %19 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.__getitem__.t %arg4, %21 : !torch.list<int>, !torch.int -> !torch.int
%23 = torch.aten.mul.int %22, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.sub.int %20, %23 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.add.int %24, %15 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %12 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.add.int %26, %int1 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.append.t %5, %27 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.add.int %15, %int1 : !torch.int, !torch.int -> !torch.int
%17 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg4, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %19, %int2 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.add.int %17, %20 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.sub.int %21, %16 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.__getitem__.t %arg3, %23 : !torch.list<int>, !torch.int -> !torch.int
%25 = torch.aten.floordiv.int %22, %24 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %int1 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.append.t %5, %26 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
return %5 : !torch.list<int>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func private @__torch_mlir_shape_fn.aten.convolution(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%0 = call @__torch__.torch.jit._shape_functions.conv_forwards(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6, %arg7, %arg8) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>
return %0 : !torch.list<int>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func private @__torch_mlir_shape_fn.aten.convolution(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%true = torch.constant.bool true
%int0 = torch.constant.int 0
%int1 = torch.constant.int 1
%int2 = torch.constant.int 2
%0 = torch.aten.len.t %arg5 : !torch.list<int> -> !torch.int
%1 = torch.aten.gt.int %0, %int0 : !torch.int, !torch.int -> !torch.bool
%2 = torch.aten.len.t %arg7 : !torch.list<int> -> !torch.int
%3 = torch.aten.gt.int %2, %int0 : !torch.int, !torch.int -> !torch.bool
%4 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int
%5 = torch.prim.ListConstruct : () -> !torch.list<int>
%6 = torch.prim.If %arg6 -> (!torch.int) {
torch.prim.If.yield %int1 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
%7 = torch.aten.__getitem__.t %arg0, %int0 : !torch.list<int>, !torch.int -> !torch.int
%8 = torch.aten.append.t %5, %7 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If %arg6 -> () {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.mul.int %10, %arg8 : !torch.int, !torch.int -> !torch.int
%12 = torch.aten.append.t %5, %11 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.append.t %5, %10 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
%9 = torch.aten.__range_length %int2, %4, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
torch.prim.Loop %9, %true, init() {
^bb0(%arg9: !torch.int):
%10 = torch.aten.__derive_index %arg9, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%11 = torch.prim.If %1 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg5, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int1 : !torch.int
}
%12 = torch.prim.If %3 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg7, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
torch.prim.If %arg6 -> () {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%17 = torch.aten.sub.int %16, %int1 : !torch.int, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg3, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %17, %19 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.__getitem__.t %arg4, %21 : !torch.list<int>, !torch.int -> !torch.int
%23 = torch.aten.mul.int %22, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.sub.int %20, %23 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.add.int %24, %15 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %12 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.add.int %26, %int1 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.append.t %5, %27 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.add.int %15, %int1 : !torch.int, !torch.int -> !torch.int
%17 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg4, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %19, %int2 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.add.int %17, %20 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.sub.int %21, %16 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.__getitem__.t %arg3, %23 : !torch.list<int>, !torch.int -> !torch.int
%25 = torch.aten.floordiv.int %22, %24 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %int1 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.append.t %5, %26 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
return %5 : !torch.list<int>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int257 = torch.constant.int 257
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int128 = torch.constant.int 128
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%5 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%6 = torch.shape.calculate {
%7 = torch.aten.convolution %arg0, %0, %1, %4, %2, %3, %false, %5, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %7 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%7 = torch.prim.ListConstruct %int1, %int128, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%8 = torch.prim.ListConstruct %int257, %int128, %int1, %int1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%9 = torch.prim.ListConstruct %int257 : (!torch.int) -> !torch.list<int>
%10 = torch.derefine %9 : !torch.list<int> to !torch.optional<list<int>>
%11 = func.call @__torch_mlir_shape_fn.aten.convolution(%7, %8, %10, %4, %2, %3, %false, %5, %int1) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %11 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %6 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int2 = torch.constant.int 2
%true = torch.constant.bool true
%int257 = torch.constant.int 257
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int128 = torch.constant.int 128
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%5 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%6 = torch.shape.calculate {
%7 = torch.aten.convolution %arg0, %0, %1, %4, %2, %3, %false, %5, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %7 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%7 = torch.prim.ListConstruct %int1, %int128, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%8 = torch.prim.ListConstruct %int257, %int128, %int1, %int1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%9 = torch.prim.ListConstruct : () -> !torch.list<int>
%10 = torch.aten.append.t %9, %int1 : !torch.list<int>, !torch.int -> !torch.list<int>
%11 = torch.aten.append.t %9, %int257 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.Loop %int2, %true, init() {
^bb0(%arg1: !torch.int):
%12 = torch.aten.__derive_index %arg1, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%13 = torch.aten.sub.int %12, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %3, %13 : !torch.list<int>, !torch.int -> !torch.int
%15 = torch.aten.sub.int %12, %int2 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.__getitem__.t %5, %15 : !torch.list<int>, !torch.int -> !torch.int
%17 = torch.aten.__getitem__.t %8, %12 : !torch.list<int>, !torch.int -> !torch.int
%18 = torch.aten.sub.int %17, %int1 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.mul.int %14, %18 : !torch.int, !torch.int -> !torch.int
%20 = torch.aten.add.int %19, %int1 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.__getitem__.t %7, %12 : !torch.list<int>, !torch.int -> !torch.int
%22 = torch.aten.sub.int %12, %int2 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.__getitem__.t %2, %22 : !torch.list<int>, !torch.int -> !torch.int
%24 = torch.aten.mul.int %23, %int2 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.add.int %21, %24 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.sub.int %25, %20 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.sub.int %12, %int2 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.__getitem__.t %4, %27 : !torch.list<int>, !torch.int -> !torch.int
%29 = torch.aten.floordiv.int %26, %28 : !torch.int, !torch.int -> !torch.int
%30 = torch.aten.add.int %29, %int1 : !torch.int, !torch.int -> !torch.int
%31 = torch.aten.append.t %9, %30 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
torch.shape.calculate.yield.shapes %9 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %6 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Inliner (inline) //----- //
module {
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int2 = torch.constant.int 2
%true = torch.constant.bool true
%int257 = torch.constant.int 257
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int128 = torch.constant.int 128
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%5 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%6 = torch.shape.calculate {
%7 = torch.aten.convolution %arg0, %0, %1, %4, %2, %3, %false, %5, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %7 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%7 = torch.prim.ListConstruct %int1, %int128, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%8 = torch.prim.ListConstruct %int257, %int128, %int1, %int1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%9 = torch.prim.ListConstruct : () -> !torch.list<int>
%10 = torch.aten.append.t %9, %int1 : !torch.list<int>, !torch.int -> !torch.list<int>
%11 = torch.aten.append.t %9, %int257 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.Loop %int2, %true, init() {
^bb0(%arg1: !torch.int):
%12 = torch.aten.__derive_index %arg1, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%13 = torch.aten.sub.int %12, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %3, %13 : !torch.list<int>, !torch.int -> !torch.int
%15 = torch.aten.sub.int %12, %int2 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.__getitem__.t %5, %15 : !torch.list<int>, !torch.int -> !torch.int
%17 = torch.aten.__getitem__.t %8, %12 : !torch.list<int>, !torch.int -> !torch.int
%18 = torch.aten.sub.int %17, %int1 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.mul.int %14, %18 : !torch.int, !torch.int -> !torch.int
%20 = torch.aten.add.int %19, %int1 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.__getitem__.t %7, %12 : !torch.list<int>, !torch.int -> !torch.int
%22 = torch.aten.sub.int %12, %int2 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.__getitem__.t %2, %22 : !torch.list<int>, !torch.int -> !torch.int
%24 = torch.aten.mul.int %23, %int2 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.add.int %21, %24 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.sub.int %25, %20 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.sub.int %12, %int2 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.__getitem__.t %4, %27 : !torch.list<int>, !torch.int -> !torch.int
%29 = torch.aten.floordiv.int %26, %28 : !torch.int, !torch.int -> !torch.int
%30 = torch.aten.add.int %29, %int1 : !torch.int, !torch.int -> !torch.int
%31 = torch.aten.append.t %9, %30 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
torch.shape.calculate.yield.shapes %9 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %6 : !torch.vtensor<[1,257,4,256],f32>
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After SimplifyShapeCalculations (torch-simplify-shape-calculations) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%5 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%6 = torch.shape.calculate {
%7 = torch.aten.convolution %arg0, %0, %1, %4, %2, %3, %false, %5, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %7 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%7 = torch.prim.ListConstruct %int1, %int257, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %7 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %6 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After CSE (cse) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.shape.calculate {
%5 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %5 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%5 = torch.prim.ListConstruct %int1, %int257, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %5 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After SimplifyShapeCalculations (torch-simplify-shape-calculations) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.shape.calculate {
%5 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %5 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%5 = torch.prim.ListConstruct %int1, %int257, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %5 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After DropAbstractInterpCalculations (torch-drop-abstract-interp-calculations) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ScalarizeShapes (torch-scalarize-shapes) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ReifyShapeCalculations (torch-reify-shape-calculations) //----- //
module {
func.func private @__torch__.torch.jit._shape_functions.conv_forwards(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%true = torch.constant.bool true
%int0 = torch.constant.int 0
%int1 = torch.constant.int 1
%int2 = torch.constant.int 2
%0 = torch.aten.len.t %arg5 : !torch.list<int> -> !torch.int
%1 = torch.aten.gt.int %0, %int0 : !torch.int, !torch.int -> !torch.bool
%2 = torch.aten.len.t %arg7 : !torch.list<int> -> !torch.int
%3 = torch.aten.gt.int %2, %int0 : !torch.int, !torch.int -> !torch.bool
%4 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int
%5 = torch.prim.ListConstruct : () -> !torch.list<int>
%6 = torch.prim.If %arg6 -> (!torch.int) {
torch.prim.If.yield %int1 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
%7 = torch.aten.__getitem__.t %arg0, %int0 : !torch.list<int>, !torch.int -> !torch.int
%8 = torch.aten.append.t %5, %7 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If %arg6 -> () {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.mul.int %10, %arg8 : !torch.int, !torch.int -> !torch.int
%12 = torch.aten.append.t %5, %11 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.append.t %5, %10 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
%9 = torch.aten.__range_length %int2, %4, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
torch.prim.Loop %9, %true, init() {
^bb0(%arg9: !torch.int):
%10 = torch.aten.__derive_index %arg9, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%11 = torch.prim.If %1 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg5, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int1 : !torch.int
}
%12 = torch.prim.If %3 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg7, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
torch.prim.If %arg6 -> () {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%17 = torch.aten.sub.int %16, %int1 : !torch.int, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg3, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %17, %19 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.__getitem__.t %arg4, %21 : !torch.list<int>, !torch.int -> !torch.int
%23 = torch.aten.mul.int %22, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.sub.int %20, %23 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.add.int %24, %15 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %12 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.add.int %26, %int1 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.append.t %5, %27 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.add.int %15, %int1 : !torch.int, !torch.int -> !torch.int
%17 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg4, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %19, %int2 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.add.int %17, %20 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.sub.int %21, %16 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.__getitem__.t %arg3, %23 : !torch.list<int>, !torch.int -> !torch.int
%25 = torch.aten.floordiv.int %22, %24 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %int1 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.append.t %5, %26 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
return %5 : !torch.list<int>
}
func.func private @__torch_mlir_shape_fn.aten.convolution(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%0 = call @__torch__.torch.jit._shape_functions.conv_forwards(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6, %arg7, %arg8) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>
return %0 : !torch.list<int>
}
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.shape.calculate {
%5 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %5 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%5 = torch.aten.size %arg0 : !torch.vtensor<[1,128,4,256],f32> -> !torch.list<int>
%6 = torch.aten.size %0 : !torch.vtensor<[257,128,1,1],f32> -> !torch.list<int>
%7 = torch.aten.size %1 : !torch.vtensor<[257],f32> -> !torch.list<int>
%8 = torch.derefine %7 : !torch.list<int> to !torch.optional<list<int>>
%9 = func.call @__torch_mlir_shape_fn.aten.convolution(%5, %6, %8, %3, %2, %3, %false, %2, %int1) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %9 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func private @__torch__.torch.jit._shape_functions.conv_forwards(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%true = torch.constant.bool true
%int0 = torch.constant.int 0
%int1 = torch.constant.int 1
%int2 = torch.constant.int 2
%0 = torch.aten.len.t %arg5 : !torch.list<int> -> !torch.int
%1 = torch.aten.gt.int %0, %int0 : !torch.int, !torch.int -> !torch.bool
%2 = torch.aten.len.t %arg7 : !torch.list<int> -> !torch.int
%3 = torch.aten.gt.int %2, %int0 : !torch.int, !torch.int -> !torch.bool
%4 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int
%5 = torch.prim.ListConstruct : () -> !torch.list<int>
%6 = torch.prim.If %arg6 -> (!torch.int) {
torch.prim.If.yield %int1 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
%7 = torch.aten.__getitem__.t %arg0, %int0 : !torch.list<int>, !torch.int -> !torch.int
%8 = torch.aten.append.t %5, %7 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If %arg6 -> () {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.mul.int %10, %arg8 : !torch.int, !torch.int -> !torch.int
%12 = torch.aten.append.t %5, %11 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.append.t %5, %10 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
%9 = torch.aten.__range_length %int2, %4, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
torch.prim.Loop %9, %true, init() {
^bb0(%arg9: !torch.int):
%10 = torch.aten.__derive_index %arg9, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%11 = torch.prim.If %1 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg5, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int1 : !torch.int
}
%12 = torch.prim.If %3 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg7, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
torch.prim.If %arg6 -> () {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%17 = torch.aten.sub.int %16, %int1 : !torch.int, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg3, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %17, %19 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.__getitem__.t %arg4, %21 : !torch.list<int>, !torch.int -> !torch.int
%23 = torch.aten.mul.int %22, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.sub.int %20, %23 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.add.int %24, %15 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %12 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.add.int %26, %int1 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.append.t %5, %27 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.add.int %15, %int1 : !torch.int, !torch.int -> !torch.int
%17 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg4, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %19, %int2 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.add.int %17, %20 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.sub.int %21, %16 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.__getitem__.t %arg3, %23 : !torch.list<int>, !torch.int -> !torch.int
%25 = torch.aten.floordiv.int %22, %24 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %int1 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.append.t %5, %26 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
return %5 : !torch.list<int>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func private @__torch_mlir_shape_fn.aten.convolution(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%0 = call @__torch__.torch.jit._shape_functions.conv_forwards(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6, %arg7, %arg8) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>
return %0 : !torch.list<int>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func private @__torch_mlir_shape_fn.aten.convolution(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.bool, %arg7: !torch.list<int>, %arg8: !torch.int) -> !torch.list<int> {
%true = torch.constant.bool true
%int0 = torch.constant.int 0
%int1 = torch.constant.int 1
%int2 = torch.constant.int 2
%0 = torch.aten.len.t %arg5 : !torch.list<int> -> !torch.int
%1 = torch.aten.gt.int %0, %int0 : !torch.int, !torch.int -> !torch.bool
%2 = torch.aten.len.t %arg7 : !torch.list<int> -> !torch.int
%3 = torch.aten.gt.int %2, %int0 : !torch.int, !torch.int -> !torch.bool
%4 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int
%5 = torch.prim.ListConstruct : () -> !torch.list<int>
%6 = torch.prim.If %arg6 -> (!torch.int) {
torch.prim.If.yield %int1 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
%7 = torch.aten.__getitem__.t %arg0, %int0 : !torch.list<int>, !torch.int -> !torch.int
%8 = torch.aten.append.t %5, %7 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If %arg6 -> () {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.mul.int %10, %arg8 : !torch.int, !torch.int -> !torch.int
%12 = torch.aten.append.t %5, %11 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%10 = torch.aten.__getitem__.t %arg1, %6 : !torch.list<int>, !torch.int -> !torch.int
%11 = torch.aten.append.t %5, %10 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
%9 = torch.aten.__range_length %int2, %4, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
torch.prim.Loop %9, %true, init() {
^bb0(%arg9: !torch.int):
%10 = torch.aten.__derive_index %arg9, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%11 = torch.prim.If %1 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg5, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int1 : !torch.int
}
%12 = torch.prim.If %3 -> (!torch.int) {
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %arg7, %13 : !torch.list<int>, !torch.int -> !torch.int
torch.prim.If.yield %14 : !torch.int
} else {
torch.prim.If.yield %int0 : !torch.int
}
torch.prim.If %arg6 -> () {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%17 = torch.aten.sub.int %16, %int1 : !torch.int, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg3, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %17, %19 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.__getitem__.t %arg4, %21 : !torch.list<int>, !torch.int -> !torch.int
%23 = torch.aten.mul.int %22, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.sub.int %20, %23 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.add.int %24, %15 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %12 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.add.int %26, %int1 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.append.t %5, %27 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
} else {
%13 = torch.aten.__getitem__.t %arg1, %10 : !torch.list<int>, !torch.int -> !torch.int
%14 = torch.aten.sub.int %13, %int1 : !torch.int, !torch.int -> !torch.int
%15 = torch.aten.mul.int %11, %14 : !torch.int, !torch.int -> !torch.int
%16 = torch.aten.add.int %15, %int1 : !torch.int, !torch.int -> !torch.int
%17 = torch.aten.__getitem__.t %arg0, %10 : !torch.list<int>, !torch.int -> !torch.int
%18 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %arg4, %18 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.mul.int %19, %int2 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.add.int %17, %20 : !torch.int, !torch.int -> !torch.int
%22 = torch.aten.sub.int %21, %16 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.__getitem__.t %arg3, %23 : !torch.list<int>, !torch.int -> !torch.int
%25 = torch.aten.floordiv.int %22, %24 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.add.int %25, %int1 : !torch.int, !torch.int -> !torch.int
%27 = torch.aten.append.t %5, %26 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.If.yield
}
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
return %5 : !torch.list<int>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int257 = torch.constant.int 257
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int128 = torch.constant.int 128
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.shape.calculate {
%5 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %5 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%5 = torch.prim.ListConstruct %int1, %int128, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%6 = torch.prim.ListConstruct %int257, %int128, %int1, %int1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%7 = torch.prim.ListConstruct %int257 : (!torch.int) -> !torch.list<int>
%8 = torch.derefine %7 : !torch.list<int> to !torch.optional<list<int>>
%9 = func.call @__torch_mlir_shape_fn.aten.convolution(%5, %6, %8, %3, %2, %3, %false, %2, %int1) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %9 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int2 = torch.constant.int 2
%true = torch.constant.bool true
%int257 = torch.constant.int 257
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int128 = torch.constant.int 128
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.shape.calculate {
%5 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %5 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%5 = torch.prim.ListConstruct %int1, %int128, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%6 = torch.prim.ListConstruct %int257, %int128, %int1, %int1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%7 = torch.prim.ListConstruct : () -> !torch.list<int>
%8 = torch.aten.append.t %7, %int1 : !torch.list<int>, !torch.int -> !torch.list<int>
%9 = torch.aten.append.t %7, %int257 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.Loop %int2, %true, init() {
^bb0(%arg1: !torch.int):
%10 = torch.aten.__derive_index %arg1, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%11 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%12 = torch.aten.__getitem__.t %3, %11 : !torch.list<int>, !torch.int -> !torch.int
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %2, %13 : !torch.list<int>, !torch.int -> !torch.int
%15 = torch.aten.__getitem__.t %6, %10 : !torch.list<int>, !torch.int -> !torch.int
%16 = torch.aten.sub.int %15, %int1 : !torch.int, !torch.int -> !torch.int
%17 = torch.aten.mul.int %12, %16 : !torch.int, !torch.int -> !torch.int
%18 = torch.aten.add.int %17, %int1 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %5, %10 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.__getitem__.t %2, %20 : !torch.list<int>, !torch.int -> !torch.int
%22 = torch.aten.mul.int %21, %int2 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.add.int %19, %22 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.sub.int %23, %18 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.__getitem__.t %3, %25 : !torch.list<int>, !torch.int -> !torch.int
%27 = torch.aten.floordiv.int %24, %26 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.add.int %27, %int1 : !torch.int, !torch.int -> !torch.int
%29 = torch.aten.append.t %7, %28 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
torch.shape.calculate.yield.shapes %7 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Inliner (inline) //----- //
module {
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int2 = torch.constant.int 2
%true = torch.constant.bool true
%int257 = torch.constant.int 257
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int128 = torch.constant.int 128
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.shape.calculate {
%5 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %5 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%5 = torch.prim.ListConstruct %int1, %int128, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%6 = torch.prim.ListConstruct %int257, %int128, %int1, %int1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
%7 = torch.prim.ListConstruct : () -> !torch.list<int>
%8 = torch.aten.append.t %7, %int1 : !torch.list<int>, !torch.int -> !torch.list<int>
%9 = torch.aten.append.t %7, %int257 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.Loop %int2, %true, init() {
^bb0(%arg1: !torch.int):
%10 = torch.aten.__derive_index %arg1, %int2, %int1 : !torch.int, !torch.int, !torch.int -> !torch.int
%11 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%12 = torch.aten.__getitem__.t %3, %11 : !torch.list<int>, !torch.int -> !torch.int
%13 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%14 = torch.aten.__getitem__.t %2, %13 : !torch.list<int>, !torch.int -> !torch.int
%15 = torch.aten.__getitem__.t %6, %10 : !torch.list<int>, !torch.int -> !torch.int
%16 = torch.aten.sub.int %15, %int1 : !torch.int, !torch.int -> !torch.int
%17 = torch.aten.mul.int %12, %16 : !torch.int, !torch.int -> !torch.int
%18 = torch.aten.add.int %17, %int1 : !torch.int, !torch.int -> !torch.int
%19 = torch.aten.__getitem__.t %5, %10 : !torch.list<int>, !torch.int -> !torch.int
%20 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%21 = torch.aten.__getitem__.t %2, %20 : !torch.list<int>, !torch.int -> !torch.int
%22 = torch.aten.mul.int %21, %int2 : !torch.int, !torch.int -> !torch.int
%23 = torch.aten.add.int %19, %22 : !torch.int, !torch.int -> !torch.int
%24 = torch.aten.sub.int %23, %18 : !torch.int, !torch.int -> !torch.int
%25 = torch.aten.sub.int %10, %int2 : !torch.int, !torch.int -> !torch.int
%26 = torch.aten.__getitem__.t %3, %25 : !torch.list<int>, !torch.int -> !torch.int
%27 = torch.aten.floordiv.int %24, %26 : !torch.int, !torch.int -> !torch.int
%28 = torch.aten.add.int %27, %int1 : !torch.int, !torch.int -> !torch.int
%29 = torch.aten.append.t %7, %28 : !torch.list<int>, !torch.int -> !torch.list<int>
torch.prim.Loop.condition %true, iter()
} : (!torch.int, !torch.bool) -> ()
torch.shape.calculate.yield.shapes %7 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After SimplifyShapeCalculations (torch-simplify-shape-calculations) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.shape.calculate {
%5 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %5 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%5 = torch.prim.ListConstruct %int1, %int257, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %5 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After CSE (cse) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.shape.calculate {
%5 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %5 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%5 = torch.prim.ListConstruct %int1, %int257, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %5 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After SimplifyShapeCalculations (torch-simplify-shape-calculations) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.shape.calculate {
%5 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
torch.shape.calculate.yield %5 : !torch.vtensor<[1,257,4,256],f32>
} shapes {
%5 = torch.prim.ListConstruct %int1, %int257, %int4, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
torch.shape.calculate.yield.shapes %5 : !torch.list<int>
} : !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After DropAbstractInterpCalculations (torch-drop-abstract-interp-calculations) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After RefinePublicReturn (torch-refine-public-return) //----- //
module {
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%int256 = torch.constant.int 256
%int4 = torch.constant.int 4
%int257 = torch.constant.int 257
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After DecomposeComplexOps (torch-decompose-complex-ops) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After BindSymbolicShapesPass (torch-iree-bind-symbolic-shapes) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After SetStrictSymbolicShapesPass (torch-iree-set-strict-symbolic-shapes) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After BitCastQuantTensorPass (torch-iree-bitcast-quant-tensor) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ReduceOpVariants (torch-reduce-op-variants) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ConvertCustomQuantOp (torch-convert-custom-quant-op) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After DecomposeComplexOps (torch-decompose-complex-ops) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After FuseQuantizedOps (torch-fuse-quantized-ops) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ScalarizeShapes (torch-scalarize-shapes) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ConvertTorchToTMTensor (convert-torch-to-tmtensor) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ConvertTMTensorToLinalgExtPass (torch-iree-tm-tensor-to-linalg-ext) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ConvertTorchToTensor (convert-torch-to-tensor) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%int0 = torch.constant.int 0
%2 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%3 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%4 = torch.aten.convolution %arg0, %0, %1, %3, %2, %3, %false, %2, %int1 : !torch.vtensor<[1,128,4,256],f32>, !torch.vtensor<[257,128,1,1],f32>, !torch.vtensor<[257],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,257,4,256],f32>
return %4 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ConvertTorchToLinalg (convert-torch-to-linalg) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%false = torch.constant.bool false
%int1 = torch.constant.int 1
%c1_i64 = arith.constant 1 : i64
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%3 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%int0 = torch.constant.int 0
%5 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%6 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%c0_i64 = arith.constant 0 : i64
%c0_i64_0 = arith.constant 0 : i64
%c0_i64_1 = arith.constant 0 : i64
%c0_i64_2 = arith.constant 0 : i64
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c1_3 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c256 = arith.constant 256 : index
%c0_4 = arith.constant 0 : index
%c257 = arith.constant 257 : index
%c1_5 = arith.constant 1 : index
%c128_6 = arith.constant 128 : index
%c2_7 = arith.constant 2 : index
%c1_8 = arith.constant 1 : index
%c3_9 = arith.constant 3 : index
%c1_10 = arith.constant 1 : index
%7 = arith.index_cast %c1_i64 : i64 to index
%c0_11 = arith.constant 0 : index
%8 = arith.remsi %c128, %7 : index
%9 = arith.cmpi eq, %c0_11, %8 : index
cf.assert %9, "invalid: groups must divide input channel size evenly."
%c0_12 = arith.constant 0 : index
%10 = arith.remsi %c257, %7 : index
%11 = arith.cmpi eq, %c0_12, %10 : index
cf.assert %11, "invalid: groups must divide weight batch size evenly."
%c1_i64_13 = arith.constant 1 : i64
%c1_i64_14 = arith.constant 1 : i64
%c1_i64_15 = arith.constant 1 : i64
%c1_i64_16 = arith.constant 1 : i64
%cst = arith.constant 0.000000e+00 : f32
%c0_17 = arith.constant 0 : index
%c1_18 = arith.constant 1 : index
%c1_19 = arith.constant 1 : index
%c128_20 = arith.constant 128 : index
%c2_21 = arith.constant 2 : index
%c4_22 = arith.constant 4 : index
%c3_23 = arith.constant 3 : index
%c256_24 = arith.constant 256 : index
%c0_i64_25 = arith.constant 0 : i64
%c0_26 = arith.constant 0 : index
%c0_27 = arith.constant 0 : index
%c0_28 = arith.constant 0 : index
%c0_29 = arith.constant 0 : index
%padded = tensor.pad %0 low[0, 0, 0, 0] high[0, 0, 0, 0] {
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
tensor.yield %cst : f32
} : tensor<1x128x4x256xf32> to tensor<1x128x4x256xf32>
%c1_i64_30 = arith.constant 1 : i64
%c1_i64_31 = arith.constant 1 : i64
%c2_i64 = arith.constant 2 : i64
%c0_i64_32 = arith.constant 0 : i64
%c4_i64 = arith.constant 4 : i64
%c0_i64_33 = arith.constant 0 : i64
%c3_i64 = arith.constant 3 : i64
%c4_i64_34 = arith.constant 4 : i64
%c4_35 = arith.constant 4 : index
%c1_i64_36 = arith.constant 1 : i64
%c1_i64_37 = arith.constant 1 : i64
%c2_i64_38 = arith.constant 2 : i64
%c0_i64_39 = arith.constant 0 : i64
%c256_i64 = arith.constant 256 : i64
%c0_i64_40 = arith.constant 0 : i64
%c255_i64 = arith.constant 255 : i64
%c256_i64_41 = arith.constant 256 : i64
%c256_42 = arith.constant 256 : index
%12 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%12 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%13 = arith.floordivsi %c128, %7 : index
%14 = arith.floordivsi %c257, %7 : index
%c0_43 = arith.constant 0 : index
%c1_44 = arith.constant 1 : index
%15 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%padded, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%cast = tensor.cast %15 : tensor<1x257x4x256xf32> to tensor<1x257x4x256xf32>
%16 = torch_c.from_builtin_tensor %cast : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %16 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After CSE (cse) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%int1 = torch.constant.int 1
%c1_i64 = arith.constant 1 : i64
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%3 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%int0 = torch.constant.int 0
%c128 = arith.constant 128 : index
%c257 = arith.constant 257 : index
%5 = arith.index_cast %c1_i64 : i64 to index
%c0 = arith.constant 0 : index
%6 = arith.remsi %c128, %5 : index
%7 = arith.cmpi eq, %c0, %6 : index
cf.assert %7, "invalid: groups must divide input channel size evenly."
%8 = arith.remsi %c257, %5 : index
%9 = arith.cmpi eq, %c0, %8 : index
cf.assert %9, "invalid: groups must divide weight batch size evenly."
%cst = arith.constant 0.000000e+00 : f32
%padded = tensor.pad %0 low[0, 0, 0, 0] high[0, 0, 0, 0] {
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
tensor.yield %cst : f32
} : tensor<1x128x4x256xf32> to tensor<1x128x4x256xf32>
%10 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%10 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%11 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%padded, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%cast = tensor.cast %11 : tensor<1x257x4x256xf32> to tensor<1x257x4x256xf32>
%12 = torch_c.from_builtin_tensor %cast : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %12 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ConvertTorchToSCF (convert-torch-to-scf) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%int1 = torch.constant.int 1
%c1_i64 = arith.constant 1 : i64
%1 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>) : !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%3 = torch.vtensor.literal(dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>) : !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%int0 = torch.constant.int 0
%c128 = arith.constant 128 : index
%c257 = arith.constant 257 : index
%5 = arith.index_cast %c1_i64 : i64 to index
%c0 = arith.constant 0 : index
%6 = arith.remsi %c128, %5 : index
%7 = arith.cmpi eq, %c0, %6 : index
cf.assert %7, "invalid: groups must divide input channel size evenly."
%8 = arith.remsi %c257, %5 : index
%9 = arith.cmpi eq, %c0, %8 : index
cf.assert %9, "invalid: groups must divide weight batch size evenly."
%cst = arith.constant 0.000000e+00 : f32
%10 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%10 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%11 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%12 = torch_c.from_builtin_tensor %11 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %12 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ConvertTorchToArith (convert-torch-to-arith) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%c1_i64 = arith.constant 1 : i64
%c1_i64_0 = arith.constant 1 : i64
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%1 = torch_c.from_builtin_tensor %cst : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%3 = torch_c.from_builtin_tensor %cst_1 : tensor<257xf32> -> !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%c0_i64 = arith.constant 0 : i64
%c128 = arith.constant 128 : index
%c257 = arith.constant 257 : index
%5 = arith.index_cast %c1_i64_0 : i64 to index
%c0 = arith.constant 0 : index
%6 = arith.remsi %c128, %5 : index
%7 = arith.cmpi eq, %c0, %6 : index
cf.assert %7, "invalid: groups must divide input channel size evenly."
%8 = arith.remsi %c257, %5 : index
%9 = arith.cmpi eq, %c0, %8 : index
cf.assert %9, "invalid: groups must divide weight batch size evenly."
%cst_2 = arith.constant 0.000000e+00 : f32
%10 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%10 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%11 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%12 = torch_c.from_builtin_tensor %11 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %12 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ConvertTorchConversionToMLProgram (convert-torch-conversion-to-mlprogram) //----- //
module {
ml_program.global private mutable @global_seed(dense<0> : tensor<i64>) : tensor<i64>
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%c1_i64 = arith.constant 1 : i64
%c1_i64_0 = arith.constant 1 : i64
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%1 = torch_c.from_builtin_tensor %cst : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%3 = torch_c.from_builtin_tensor %cst_1 : tensor<257xf32> -> !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%c0_i64 = arith.constant 0 : i64
%c128 = arith.constant 128 : index
%c257 = arith.constant 257 : index
%5 = arith.index_cast %c1_i64_0 : i64 to index
%c0 = arith.constant 0 : index
%6 = arith.remsi %c128, %5 : index
%7 = arith.cmpi eq, %c0, %6 : index
cf.assert %7, "invalid: groups must divide input channel size evenly."
%8 = arith.remsi %c257, %5 : index
%9 = arith.cmpi eq, %c0, %8 : index
cf.assert %9, "invalid: groups must divide weight batch size evenly."
%cst_2 = arith.constant 0.000000e+00 : f32
%10 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%10 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%11 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%12 = torch_c.from_builtin_tensor %11 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %12 : !torch.vtensor<[1,257,4,256],f32>
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After ExpandOps (memref-expand) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%c1_i64 = arith.constant 1 : i64
%c1_i64_0 = arith.constant 1 : i64
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%1 = torch_c.from_builtin_tensor %cst : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%3 = torch_c.from_builtin_tensor %cst_1 : tensor<257xf32> -> !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%c0_i64 = arith.constant 0 : i64
%c128 = arith.constant 128 : index
%c257 = arith.constant 257 : index
%5 = arith.index_cast %c1_i64_0 : i64 to index
%c0 = arith.constant 0 : index
%6 = arith.remsi %c128, %5 : index
%7 = arith.cmpi eq, %c0, %6 : index
cf.assert %7, "invalid: groups must divide input channel size evenly."
%8 = arith.remsi %c257, %5 : index
%9 = arith.cmpi eq, %c0, %8 : index
cf.assert %9, "invalid: groups must divide weight batch size evenly."
%cst_2 = arith.constant 0.000000e+00 : f32
%10 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%10 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%11 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%12 = torch_c.from_builtin_tensor %11 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %12 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%1 = torch_c.from_builtin_tensor %cst_0 : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%3 = torch_c.from_builtin_tensor %cst : tensor<257xf32> -> !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%5 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%5 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%6 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%7 = torch_c.from_builtin_tensor %6 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %7 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After ResolveShapedTypeResultDims (resolve-shaped-type-result-dims) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%1 = torch_c.from_builtin_tensor %cst_0 : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%3 = torch_c.from_builtin_tensor %cst : tensor<257xf32> -> !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%5 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%5 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%6 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%7 = torch_c.from_builtin_tensor %6 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %7 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After CSE (cse) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%1 = torch_c.from_builtin_tensor %cst_0 : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%3 = torch_c.from_builtin_tensor %cst : tensor<257xf32> -> !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%5 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%5 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%6 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%7 = torch_c.from_builtin_tensor %6 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %7 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%1 = torch_c.from_builtin_tensor %cst_0 : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%3 = torch_c.from_builtin_tensor %cst : tensor<257xf32> -> !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%5 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%5 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%6 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%7 = torch_c.from_builtin_tensor %6 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %7 : !torch.vtensor<[1,257,4,256],f32>
}
// -----// IR Dump After Inliner (inline) //----- //
module {
ml_program.global private mutable @global_seed(dense<0> : tensor<i64>) : tensor<i64>
func.func @torch_jit(%arg0: !torch.vtensor<[1,128,4,256],f32>) -> !torch.vtensor<[1,257,4,256],f32> attributes {torch.assume_strict_symbolic_shapes, torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%1 = torch_c.from_builtin_tensor %cst_0 : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%3 = torch_c.from_builtin_tensor %cst : tensor<257xf32> -> !torch.vtensor<[257],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%5 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%4 : tensor<257xf32>) outs(%5 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%6 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %2 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%7 = torch_c.from_builtin_tensor %6 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
return %7 : !torch.vtensor<[1,257,4,256],f32>
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000"
}
}
#-}
// -----// IR Dump After FuncConversionPass (torch-iree-func-conversion) //----- //
module {
ml_program.global private mutable @global_seed(dense<0> : tensor<i64>) : tensor<i64>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = torch_c.from_builtin_tensor %0 : tensor<1x128x4x256xf32> -> !torch.vtensor<[1,128,4,256],f32>
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%3 = torch_c.from_builtin_tensor %cst_0 : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%5 = torch_c.from_builtin_tensor %cst : tensor<257xf32> -> !torch.vtensor<[257],f32>
%6 = torch_c.to_builtin_tensor %5 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%7 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%6 : tensor<257xf32>) outs(%7 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%8 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%2, %4 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%9 = torch_c.from_builtin_tensor %8 : tensor<1x257x4x256xf32> -> !torch.vtensor<[1,257,4,256],f32>
%10 = hal.tensor.barrier join(%8 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%11 = hal.tensor.export %10 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%0 = util.null : !hal.fence
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = torch_c.from_builtin_tensor %0 : tensor<1x128x4x256xf32> -> !torch.vtensor<[1,128,4,256],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%3 = torch_c.from_builtin_tensor %cst : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%5 = torch_c.from_builtin_tensor %cst_0 : tensor<257xf32> -> !torch.vtensor<[257],f32>
%6 = torch_c.to_builtin_tensor %5 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%7 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%6 : tensor<257xf32>) outs(%7 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%8 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%2, %4 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%9 = hal.tensor.barrier join(%8 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%10 = hal.tensor.export %9 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = torch_c.from_builtin_tensor %0 : tensor<1x128x4x256xf32> -> !torch.vtensor<[1,128,4,256],f32>
%2 = torch_c.to_builtin_tensor %1 : !torch.vtensor<[1,128,4,256],f32> -> tensor<1x128x4x256xf32>
%3 = torch_c.from_builtin_tensor %cst : tensor<257x128x1x1xf32> -> !torch.vtensor<[257,128,1,1],f32>
%4 = torch_c.to_builtin_tensor %3 : !torch.vtensor<[257,128,1,1],f32> -> tensor<257x128x1x1xf32>
%5 = torch_c.from_builtin_tensor %cst_0 : tensor<257xf32> -> !torch.vtensor<[257],f32>
%6 = torch_c.to_builtin_tensor %5 : !torch.vtensor<[257],f32> -> tensor<257xf32>
%7 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%6 : tensor<257xf32>) outs(%7 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%8 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%2, %4 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%9 = hal.tensor.barrier join(%8 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%10 = hal.tensor.export %9 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FinalizingBackendTypeConversion (torch-finalizing-backend-type-conversion) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After FinalizingBackendTypeConversion (torch-finalizing-backend-type-conversion) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After IREEImportPublicPass (iree-import-public) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After ImportMLProgramPass (iree-import-ml-program) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After SanitizeModuleNamesPass (iree-sanitize-module-names) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After DemoteF64ToF32Pass (iree-input-conversion-demote-f64-to-f32) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Inliner (inline) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
module {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {hal.device.targets = [#device_target_hip]} {
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_0 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%broadcasted : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%2 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = tensor.empty() : tensor<1x257x4x256xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%4 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst_0 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%3 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%4, %broadcasted : tensor<1x257x4x256xf32>, tensor<1x257x4x256xf32>) outs(%3 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%8 = arith.addf %in, %in_2 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = tensor.empty() : tensor<1x257x4x256xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%4 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst_0 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%3 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%4, %broadcasted : tensor<1x257x4x256xf32>, tensor<1x257x4x256xf32>) outs(%3 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%8 = arith.addf %in, %in_2 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = tensor.empty() : tensor<1x257x4x256xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%4 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst_0 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%3 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%4, %broadcasted : tensor<1x257x4x256xf32>, tensor<1x257x4x256xf32>) outs(%3 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%8 = arith.addf %in, %in_2 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = tensor.empty() : tensor<1x257x4x256xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%4 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst_0 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%3 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%4, %broadcasted : tensor<1x257x4x256xf32>, tensor<1x257x4x256xf32>) outs(%3 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%8 = arith.addf %in, %in_2 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = tensor.empty() : tensor<1x257x4x256xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%4 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst_0 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%3 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%4, %broadcasted : tensor<1x257x4x256xf32>, tensor<1x257x4x256xf32>) outs(%3 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%8 = arith.addf %in, %in_2 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = tensor.empty() : tensor<1x257x4x256xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%4 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst_0 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%3 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%4, %broadcasted : tensor<1x257x4x256xf32>, tensor<1x257x4x256xf32>) outs(%3 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%8 = arith.addf %in, %in_2 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%broadcasted = linalg.broadcast ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) dimensions = [0, 2, 3]
%2 = tensor.empty() : tensor<1x257x4x256xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%4 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %cst_0 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%3 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%4, %broadcasted : tensor<1x257x4x256xf32>, tensor<1x257x4x256xf32>) outs(%3 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%8 = arith.addf %in, %in_2 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<1x257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x257x4x256xf32>
%3 = tensor.empty() : tensor<1x257x4x256xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x257x4x256xf32>) -> tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d4, d2 + d5, d3 + d6)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d1, d4, d5, d6)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction"]} ins(%0, %cst_0 : tensor<1x128x4x256xf32>, tensor<257x128x1x1xf32>) outs(%4 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%9 = arith.mulf %in, %in_2 : f32
%10 = arith.addf %out, %9 : f32
linalg.yield %10 : f32
} -> tensor<1x257x4x256xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x257x4x256xf32>, tensor<1x257x4x256xf32>) outs(%4 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%9 = arith.addf %in, %in_2 : f32
linalg.yield %9 : f32
} -> tensor<1x257x4x256xf32>
%7 = hal.tensor.barrier join(%6 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%8 = hal.tensor.export %7 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After FoldUnitExtentDimsPass (iree-dispatch-creation-fold-unit-extent-dims) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = tensor.empty() : tensor<257x4x256xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%11 = arith.mulf %in, %in_3 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x4x256xf32>
%6 = tensor.empty() : tensor<257x4x256xf32>
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%8 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%7 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%11 = arith.addf %in, %in_3 : f32
linalg.yield %11 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %8 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%9 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%10 = hal.tensor.export %9 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = tensor.empty() : tensor<257x4x256xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%11 = arith.mulf %in, %in_3 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x4x256xf32>
%6 = tensor.empty() : tensor<257x4x256xf32>
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%7 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%11 = arith.addf %in, %in_3 : f32
linalg.yield %11 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %8 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%9 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%10 = hal.tensor.export %9 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = tensor.empty() : tensor<257x4x256xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%11 = arith.mulf %in, %in_3 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x4x256xf32>
%6 = tensor.empty() : tensor<257x4x256xf32>
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%7 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%11 = arith.addf %in, %in_3 : f32
linalg.yield %11 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %8 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%9 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%10 = hal.tensor.export %9 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After PropagateLinalgTransposePass (iree-global-opt-propagate-linalg-transpose) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After PropagateLinalgTransposePass (iree-global-opt-propagate-linalg-transpose) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After MaterializeEncodingIntoNopPass (iree-codegen-materialize-encoding-into-nop) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After MaterializeEncodingIntoNopPass (iree-codegen-materialize-encoding-into-nop) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After MaterializeHomogeneousEncodingsPass (iree-global-opt-materialize-homogeneous-encodings) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After SimplifyPackUnpackPass (iree-global-opt-simplify-pack-unpack) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After HoistIntoGlobalsPass (iree-util-hoist-into-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After JitGlobalsPass (iree-consteval-jit-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-dispatch-creation-tensor-pad-to-tensor-insert-slice) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map2 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [#map2, #map3, #map4], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cst_1 : tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%3 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%4, %2 : tensor<257x4x256xf32>, tensor<257x4x256xf32>) outs(%3 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%4 = tensor.empty() : tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%4 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%4 = tensor.empty() : tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%4 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.mulf %in, %in_3 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.addf %in, %in_3 : f32
linalg.yield %7 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %4 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.mulf %in, %in_4 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %3 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%expanded_3 = tensor.expand_shape %cst_1 [[0, 1]] output_shape [1, 257] : tensor<257xf32> into tensor<1x257xf32>
%4 = tensor.empty() : tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%expanded, %expanded_3 : tensor<1x257x4x256xf32>, tensor<1x257xf32>) outs(%4 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.addf %in, %in_4 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.mulf %in, %in_4 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %3 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%expanded_3 = tensor.expand_shape %cst_1 [[0, 1]] output_shape [1, 257] : tensor<257xf32> into tensor<1x257xf32>
%4 = tensor.empty() : tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%expanded, %expanded_3 : tensor<1x257x4x256xf32>, tensor<1x257xf32>) outs(%4 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.addf %in, %in_4 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.mulf %in, %in_4 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %3 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%expanded_3 = tensor.expand_shape %cst_1 [[0, 1]] output_shape [1, 257] : tensor<257xf32> into tensor<1x257xf32>
%4 = tensor.empty() : tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%expanded, %expanded_3 : tensor<1x257x4x256xf32>, tensor<1x257xf32>) outs(%4 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.addf %in, %in_4 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.mulf %in, %in_4 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %3 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%expanded_3 = tensor.expand_shape %cst_1 [[0, 1]] output_shape [1, 257] : tensor<257xf32> into tensor<1x257xf32>
%4 = tensor.empty() : tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%expanded, %expanded_3 : tensor<1x257x4x256xf32>, tensor<1x257xf32>) outs(%4 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.addf %in, %in_4 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.mulf %in, %in_4 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %3 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%expanded_3 = tensor.expand_shape %cst_1 [[0, 1]] output_shape [1, 257] : tensor<257xf32> into tensor<1x257xf32>
%4 = tensor.empty() : tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%expanded, %expanded_3 : tensor<1x257x4x256xf32>, tensor<1x257xf32>) outs(%4 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.addf %in, %in_4 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.mulf %in, %in_4 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %3 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%expanded_3 = tensor.expand_shape %cst_1 [[0, 1]] output_shape [1, 257] : tensor<257xf32> into tensor<1x257xf32>
%4 = tensor.empty() : tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%expanded, %expanded_3 : tensor<1x257x4x256xf32>, tensor<1x257xf32>) outs(%4 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.addf %in, %in_4 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.mulf %in, %in_4 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %3 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%expanded_3 = tensor.expand_shape %cst_1 [[0, 1]] output_shape [1, 257] : tensor<257xf32> into tensor<1x257xf32>
%4 = tensor.empty() : tensor<1x257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%expanded, %expanded_3 : tensor<1x257x4x256xf32>, tensor<1x257xf32>) outs(%4 : tensor<1x257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%8 = arith.addf %in, %in_4 : f32
linalg.yield %8 : f32
} -> tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%5 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%4 = tensor.empty() : tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%4 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%4 = tensor.empty() : tensor<257x4x256xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%4 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %5 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%6 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%7 = hal.tensor.export %6 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.mulf %in, %in_3 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.addf %in, %in_3 : f32
linalg.yield %7 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %4 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.mulf %in, %in_3 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.addf %in, %in_3 : f32
linalg.yield %7 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %4 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.mulf %in, %in_3 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.addf %in, %in_3 : f32
linalg.yield %7 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %4 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.mulf %in, %in_3 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.addf %in, %in_3 : f32
linalg.yield %7 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %4 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.mulf %in, %in_3 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.addf %in, %in_3 : f32
linalg.yield %7 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %4 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.mulf %in, %in_3 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.addf %in, %in_3 : f32
linalg.yield %7 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %4 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.mulf %in, %in_3 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%7 = arith.addf %in, %in_3 : f32
linalg.yield %7 : f32
} -> tensor<257x4x256xf32>
%expanded = tensor.expand_shape %4 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%3 = flow.dispatch.region -> (tensor<257x4x256xf32>) {
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%2 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.mulf %in, %in_3 : f32
%9 = arith.addf %out, %8 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%6, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%1 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%8 = arith.addf %in, %in_3 : f32
linalg.yield %8 : f32
} -> tensor<257x4x256xf32>
flow.return %7 : tensor<257x4x256xf32>
}
%expanded = tensor.expand_shape %3 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%4 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%5 = hal.tensor.export %4 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%2 = flow.dispatch.region -> (tensor<257x4x256xf32>) {
%5 = tensor.empty() : tensor<257x4x256xf32>
%cst_3 = arith.constant 0.000000e+00 : f32
%6 = linalg.fill ins(%cst_3 : f32) outs(%5 : tensor<257x4x256xf32>) -> tensor<257x4x256xf32>
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%collapsed, %collapsed_2 : tensor<128x4x256xf32>, tensor<257x128xf32>) outs(%6 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%9 = arith.mulf %in, %in_4 : f32
%10 = arith.addf %out, %9 : f32
linalg.yield %10 : f32
} -> tensor<257x4x256xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%7, %cst_1 : tensor<257x4x256xf32>, tensor<257xf32>) outs(%5 : tensor<257x4x256xf32>) {
^bb0(%in: f32, %in_4: f32, %out: f32):
%9 = arith.addf %in, %in_4 : f32
linalg.yield %9 : f32
} -> tensor<257x4x256xf32>
flow.return %8 : tensor<257x4x256xf32>
}
%expanded = tensor.expand_shape %2 [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%expanded : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%collapsed_3 = tensor.collapse_shape %collapsed [[0], [1, 2]] : tensor<128x4x256xf32> into tensor<128x1024xf32>
%2 = flow.dispatch.region -> (tensor<257x1024xf32>) {
%cst_5 = arith.constant 0.000000e+00 : f32
%5 = tensor.empty() : tensor<257x1024xf32>
%6 = linalg.fill ins(%cst_5 : f32) outs(%5 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%collapsed_3, %collapsed_2 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%6 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_7: f32, %out: f32):
%10 = arith.mulf %in, %in_7 : f32
%11 = arith.addf %out, %10 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
%8 = tensor.empty() : tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%7, %cst_1 : tensor<257x1024xf32>, tensor<257xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_7: f32, %out: f32):
%10 = arith.addf %in, %in_7 : f32
linalg.yield %10 : f32
} -> tensor<257x1024xf32>
%expanded_6 = tensor.expand_shape %9 [[0], [1, 2]] output_shape [257, 4, 256] : tensor<257x1024xf32> into tensor<257x4x256xf32>
flow.return %9 : tensor<257x1024xf32>
}
%expanded = tensor.expand_shape %2 [[0], [1, 2]] output_shape [257, 4, 256] : tensor<257x1024xf32> into tensor<257x4x256xf32>
%expanded_4 = tensor.expand_shape %expanded [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%expanded_4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_1 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = tensor.empty() : tensor<257x4x256xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2], [3]] : tensor<1x128x4x256xf32> into tensor<128x4x256xf32>
%collapsed_2 = tensor.collapse_shape %cst_0 [[0], [1, 2, 3]] : tensor<257x128x1x1xf32> into tensor<257x128xf32>
%collapsed_3 = tensor.collapse_shape %collapsed [[0], [1, 2]] : tensor<128x4x256xf32> into tensor<128x1024xf32>
%2 = flow.dispatch.workgroups(%collapsed_3, %collapsed_2, %cst_1) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%cst_5 = arith.constant 0.000000e+00 : f32
%8 = tensor.empty() : tensor<257x1024xf32>
%9 = linalg.fill ins(%cst_5 : f32) outs(%8 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%5, %6 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%9 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_7: f32, %out: f32):
%13 = arith.mulf %in, %in_7 : f32
%14 = arith.addf %out, %13 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
%11 = tensor.empty() : tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%10, %7 : tensor<257x1024xf32>, tensor<257xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_7: f32, %out: f32):
%13 = arith.addf %in, %in_7 : f32
linalg.yield %13 : f32
} -> tensor<257x1024xf32>
%expanded_6 = tensor.expand_shape %12 [[0], [1, 2]] output_shape [257, 4, 256] : tensor<257x1024xf32> into tensor<257x4x256xf32>
flow.dispatch.tensor.store %12, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
}
%expanded = tensor.expand_shape %2 [[0], [1, 2]] output_shape [257, 4, 256] : tensor<257x1024xf32> into tensor<257x4x256xf32>
%expanded_4 = tensor.expand_shape %expanded [[0, 1], [2], [3]] output_shape [1, 257, 4, 256] : tensor<257x4x256xf32> into tensor<1x257x4x256xf32>
%3 = hal.tensor.barrier join(%expanded_4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%4 = hal.tensor.export %3 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%15 = arith.mulf %in, %in_2 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
} -> tensor<257x1024xf32>
%13 = tensor.empty() : tensor<257x1024xf32>
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%13 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%15 = arith.addf %in, %in_2 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %14, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.mulf %in, %in_2 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%10 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.addf %in, %in_2 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.mulf %in, %in_2 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%10 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.addf %in, %in_2 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.mulf %in, %in_2 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%10 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.addf %in, %in_2 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.mulf %in, %in_2 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
%13 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%10 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.addf %in, %in_2 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.mulf %in, %in_2 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%10 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.addf %in, %in_2 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.mulf %in, %in_2 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%10 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.addf %in, %in_2 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.mulf %in, %in_2 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%10 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.addf %in, %in_2 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.mulf %in, %in_2 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%10 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.addf %in, %in_2 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch.workgroups(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst_1 = arith.constant 0.000000e+00 : f32
%7 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%8 = flow.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%9 = flow.dispatch.tensor.load %arg5, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%10 = tensor.empty() : tensor<257x1024xf32>
%11 = linalg.fill ins(%cst_1 : f32) outs(%10 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%12 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%7, %8 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%11 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.mulf %in, %in_2 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<257x1024xf32>
%13 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%12, %9 : tensor<257x1024xf32>, tensor<257xf32>) outs(%10 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_2: f32, %out: f32):
%14 = arith.addf %in, %in_2 : f32
linalg.yield %14 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After StripDebugOpsPass (iree-util-strip-debug-ops) //----- //
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = arith.constant dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %cst : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %cst_0) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After OutlineConstantsPass (iree-flow-outline-constants) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @torch_jit$async_dispatch_0 {
flow.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<257x128xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<257xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%3 = tensor.empty() : tensor<257x1024xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%5 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%4 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.mulf %in, %in_0 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
} -> tensor<257x1024xf32>
%6 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%5, %2 : tensor<257x1024xf32>, tensor<257xf32>) outs(%3 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
linalg.yield %7 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global private @__constant_tensor_257xf32 {inlining_policy = #util.inline.never, stream.affinity.default = #hal.device.affinity<@__device_0>} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : tensor<257xf32>
%0 = hal.tensor.import wait(%arg1) => %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32>
%1 = flow.tensor.reshape %__constant_tensor_257x128x1x1xf32 : tensor<257x128x1x1xf32> -> tensor<257x128xf32>
%2 = flow.tensor.reshape %0 : tensor<1x128x4x256xf32> -> tensor<128x1024xf32>
%3 = flow.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2, %1, %__constant_tensor_257xf32) : (tensor<128x1024xf32>, tensor<257x128xf32>, tensor<257xf32>) -> tensor<257x1024xf32>
%4 = flow.tensor.reshape %3 : tensor<257x1024xf32> -> tensor<1x257x4x256xf32>
%5 = hal.tensor.barrier join(%4 : tensor<1x257x4x256xf32>) => %arg2 : !hal.fence
%6 = hal.tensor.export %5 : tensor<1x257x4x256xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257x128x1x1xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load @__constant_tensor_257x128x1x1xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%__constant_tensor_257xf32 = util.global.load @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load @__constant_tensor_257xf32__size : index
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.clone on(#hal.device.affinity<@__device_0>) %0 : tensor<257x128x1x1xf32> in !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size} -> tensor<257x128xf32> in !stream.resource<*>{%7}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%10 = stream.tensor.clone on(#hal.device.affinity<@__device_0>) %6 : tensor<1x128x4x256xf32> in !stream.resource<*>{%2} -> tensor<128x1024xf32> in !stream.resource<*>{%9}
%c0 = arith.constant 0 : index
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%10[%c0 to %9 for %9], %8[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%9}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%11}
%13 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%14 = stream.tensor.clone on(#hal.device.affinity<@__device_0>) %12 : tensor<257x1024xf32> in !stream.resource<*>{%11} -> tensor<1x257x4x256xf32> in !stream.resource<*>{%13}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %14 : !stream.resource<*>{%13} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%15 = stream.async.transfer %result : !stream.resource<*>{%13} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%13}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<1x257x4x256xf32> in !stream.resource<external>{%13} -> !hal.buffer_view
util.return %16 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257x128x1x1xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load @__constant_tensor_257x128x1x1xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%__constant_tensor_257xf32 = util.global.load @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load @__constant_tensor_257xf32__size : index
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.clone on(#hal.device.affinity<@__device_0>) %0 : tensor<257x128x1x1xf32> in !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size} -> tensor<257x128xf32> in !stream.resource<*>{%7}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%10 = stream.tensor.clone on(#hal.device.affinity<@__device_0>) %6 : tensor<1x128x4x256xf32> in !stream.resource<*>{%2} -> tensor<128x1024xf32> in !stream.resource<*>{%9}
%c0 = arith.constant 0 : index
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%10[%c0 to %9 for %9], %8[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%9}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%11}
%13 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%14 = stream.tensor.clone on(#hal.device.affinity<@__device_0>) %12 : tensor<257x1024xf32> in !stream.resource<*>{%11} -> tensor<1x257x4x256xf32> in !stream.resource<*>{%13}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %14 : !stream.resource<*>{%13} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%15 = stream.async.transfer %result : !stream.resource<*>{%13} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%13}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<1x257x4x256xf32> in !stream.resource<external>{%13} -> !hal.buffer_view
util.return %16 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load @__constant_tensor_257x128x1x1xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%__constant_tensor_257xf32 = util.global.load @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load @__constant_tensor_257xf32__size : index
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Inliner (inline) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257x128x1x1xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load @__constant_tensor_257x128x1x1xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%__constant_tensor_257xf32 = util.global.load @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load @__constant_tensor_257xf32__size : index
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load @__constant_tensor_257x128x1x1xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%__constant_tensor_257xf32 = util.global.load @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load @__constant_tensor_257xf32__size : index
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load @__constant_tensor_257x128x1x1xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%__constant_tensor_257xf32 = util.global.load @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load @__constant_tensor_257xf32__size : index
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load @__constant_tensor_257x128x1x1xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%__constant_tensor_257xf32 = util.global.load @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load @__constant_tensor_257xf32__size : index
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load @__constant_tensor_257xf32__size : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257x128x1x1xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257x128x1x1xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257x128x1x1xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257xf32__size : index
util.return
}
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After CombineInitializersPass (iree-util-combine-initializers) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257x128x1x1xf32__size : index
util.initializer {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257x128x1x1xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%0 = stream.resource.size %cst : !stream.resource<constant>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0, @__constant_tensor_257x128x1x1xf32__size : index
%cst_0 = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<257xf32> in !stream.resource<constant> = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
%1 = stream.resource.size %cst_0 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %1, @__constant_tensor_257xf32__size : index
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.global private @__constant_tensor_257xf32__size : index
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x4x256xf32> : index
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%2}
%4 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%5 = stream.timepoint.await %4 => %3 : !stream.resource<external>{%2}
%6 = stream.async.transfer %5 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x128xf32> : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x1024xf32> : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<257x1024xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%6[%c0 to %8 for %8], %0[%c0 to %7 for %7], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%8}, !stream.resource<*>{%7}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%9}
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x257x4x256xf32> : index
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %10 : !stream.resource<*>{%11} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%12 = stream.async.transfer %result : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<1x257x4x256xf32> in !stream.resource<external>{%11} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %c131584, @__constant_tensor_257x128x1x1xf32__size : index
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %c1028, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- //
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %c131584, @__constant_tensor_257x128x1x1xf32__size : index
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %c1028, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %c131584, @__constant_tensor_257x128x1x1xf32__size : index
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %c1028, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %c131584, @__constant_tensor_257x128x1x1xf32__size : index
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %c1028, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %c131584, @__constant_tensor_257x128x1x1xf32__size : index
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %c1028, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %c131584, @__constant_tensor_257x128x1x1xf32__size : index
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.global.store %c1028, @__constant_tensor_257xf32__size : index
util.return
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257x128x1x1xf32__size = util.global.load immutable @__constant_tensor_257x128x1x1xf32__size : index
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__size = util.global.load immutable @__constant_tensor_257xf32__size : index
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%__constant_tensor_257x128x1x1xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257x128x1x1xf32__size}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%__constant_tensor_257xf32__size} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%__constant_tensor_257xf32__size}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %__constant_tensor_257xf32__size for %__constant_tensor_257xf32__size]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%__constant_tensor_257xf32__size}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c131584}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %c1028 for %c1028]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%c1028}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c131584}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %c1028 for %c1028]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%c1028}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c131584}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %c1028 for %c1028]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%c1028}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c131584}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %c1028 for %c1028]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%c1028}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c131584}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %c1028 for %c1028]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%c1028}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c131584}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %c1028 for %c1028]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%c1028}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c131584}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %c1028 for %c1028]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%c1028}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c131584}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %c1028 for %c1028]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%c1028}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%0 = stream.async.transfer %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c131584}
%1 = stream.async.transfer %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.await %3 => %2 : !stream.resource<external>{%c524288}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c524288} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c524288}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%5[%c0 to %c524288 for %c524288], %0[%c0 to %c131584 for %c131584], %1[%c0 to %c1028 for %c1028]) : (!stream.resource<*>{%c524288}, !stream.resource<*>{%c131584}, !stream.resource<*>{%c1028}) -> !stream.resource<*>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %6 : !stream.resource<*>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%7 = stream.async.transfer %result : !stream.resource<*>{%c1052672} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1052672}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%c1028 = arith.constant 1028 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c1 = arith.constant 1 : index
%c524288 = arith.constant 524288 : index
%c131584 = arith.constant 131584 : index
%c1052672 = arith.constant 1052672 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%cst = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant on(#hal.device.affinity<@__device_0>) : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
util.global.store %cst, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %cst_0, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.await %1 => %0 : !stream.resource<external>{%c524288}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%2[%c0 to %c524288 for %c524288], %__constant_tensor_257x128x1x1xf32[%c0 to %c131584 for %c131584], %__constant_tensor_257xf32[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
%result, %result_timepoint = stream.timepoint.barrier on(#hal.device.affinity<@__device_0>) %3 : !stream.resource<external>{%c1052672} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %result : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%1) => with(%0 as %arg3: !stream.resource<external>{%c524288}, %__constant_tensor_257x128x1x1xf32 as %arg4: !stream.resource<constant>{%c131584}, %__constant_tensor_257xf32 as %arg5: !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672} {
%3 = stream.async.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg3[%c0 to %c524288 for %c524288], %arg4[%c0 to %c131584 for %c131584], %arg5[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
stream.yield %3 : !stream.resource<external>{%c1052672}
} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%2 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %results : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %2 : !hal.buffer_view
}
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%results:2, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> (!stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) {
%cst = stream.async.constant : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
stream.yield %cst, %cst_0 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
} => !stream.timepoint
%0:2 = stream.timepoint.await %result_timepoint => %results#0, %results#1 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
util.global.store %0#0, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0#1, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%1) => with(%0 as %arg3: !stream.resource<external>{%c524288}, %__constant_tensor_257x128x1x1xf32 as %arg4: !stream.resource<constant>{%c131584}, %__constant_tensor_257xf32 as %arg5: !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672} {
%3 = stream.async.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg3[%c0 to %c524288 for %c524288], %arg4[%c0 to %c131584 for %c131584], %arg5[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
stream.yield %3 : !stream.resource<external>{%c1052672}
} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%2 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %results : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %2 : !hal.buffer_view
}
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%results:2, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> (!stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) {
%cst = stream.async.constant : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
stream.yield %cst, %cst_0 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
} => !stream.timepoint
%0:2 = stream.timepoint.await %result_timepoint => %results#0, %results#1 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
util.global.store %0#0, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %0#1, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private mutable @__constant_tensor_257x128x1x1xf32__timepoint = #stream.timepoint<immediate> : !stream.timepoint
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%results:2, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> (!stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) {
%cst = stream.async.constant : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
stream.yield %cst, %cst_0 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
} => !stream.timepoint
%0:2 = stream.timepoint.await %result_timepoint => %results#0, %results#1 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
util.global.store %result_timepoint, @__constant_tensor_257x128x1x1xf32__timepoint : !stream.timepoint
util.global.store %results#0, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %result_timepoint, @__constant_tensor_257xf32__timepoint : !stream.timepoint
util.global.store %results#1, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private mutable @__constant_tensor_257xf32__timepoint = #stream.timepoint<immediate> : !stream.timepoint
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32__timepoint = util.global.load @__constant_tensor_257x128x1x1xf32__timepoint : !stream.timepoint
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%0 = stream.timepoint.await %__constant_tensor_257x128x1x1xf32__timepoint => %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584}
%__constant_tensor_257xf32__timepoint = util.global.load @__constant_tensor_257xf32__timepoint : !stream.timepoint
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%1 = stream.timepoint.await %__constant_tensor_257xf32__timepoint => %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.immediate => !stream.timepoint
%5 = stream.timepoint.join max(%3, %4, %__constant_tensor_257x128x1x1xf32__timepoint, %__constant_tensor_257xf32__timepoint) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%5) => with(%2 as %arg3: !stream.resource<external>{%c524288}, %__constant_tensor_257x128x1x1xf32 as %arg4: !stream.resource<constant>{%c131584}, %__constant_tensor_257xf32 as %arg5: !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672} {
%7 = stream.async.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg3[%c0 to %c524288 for %c524288], %arg4[%c0 to %c131584 for %c131584], %arg5[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
stream.yield %7 : !stream.resource<external>{%c1052672}
} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %results : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>
#map = affine_map<(d0, d1, d2) -> (d2, d1)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<(d0, d1) -> (d0, d1)>
#map4 = affine_map<(d0, d1) -> (d0)>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @torch_jit$async_dispatch_0 {
stream.executable.export public @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x1024xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257x128xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<257xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 1024], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x1024xf32>> -> tensor<128x1024xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [257, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<257x128xf32>> -> tensor<257x128xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [257], strides = [1] : !flow.dispatch.tensor<readonly:tensor<257xf32>> -> tensor<257xf32>
%7 = tensor.empty() : tensor<257x1024xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<257x1024xf32>) -> tensor<257x1024xf32>
%9 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<128x1024xf32>, tensor<257x128xf32>) outs(%8 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.mulf %in, %in_0 : f32
%12 = arith.addf %out, %11 : f32
linalg.yield %12 : f32
} -> tensor<257x1024xf32>
%10 = linalg.generic {indexing_maps = [#map3, #map4, #map3], iterator_types = ["parallel", "parallel"]} ins(%9, %6 : tensor<257x1024xf32>, tensor<257xf32>) outs(%7 : tensor<257x1024xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<257x1024xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [257, 1024], strides = [1, 1] : tensor<257x1024xf32> -> !flow.dispatch.tensor<writeonly:tensor<257x1024xf32>>
return
}
}
}
util.global private mutable @__constant_tensor_257x128x1x1xf32__timepoint = #stream.timepoint<immediate> : !stream.timepoint
util.global private @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%results:2, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> (!stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) {
%cst = stream.async.constant : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
stream.yield %cst, %cst_0 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
} => !stream.timepoint
%0:2 = stream.timepoint.await %result_timepoint => %results#0, %results#1 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
util.global.store %result_timepoint, @__constant_tensor_257x128x1x1xf32__timepoint : !stream.timepoint
util.global.store %results#0, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %result_timepoint, @__constant_tensor_257xf32__timepoint : !stream.timepoint
util.global.store %results#1, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
util.global private mutable @__constant_tensor_257xf32__timepoint = #stream.timepoint<immediate> : !stream.timepoint
util.global private @__constant_tensor_257xf32 : !stream.resource<constant>
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32__timepoint = util.global.load @__constant_tensor_257x128x1x1xf32__timepoint : !stream.timepoint
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%0 = stream.timepoint.await %__constant_tensor_257x128x1x1xf32__timepoint => %__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>{%c131584}
%__constant_tensor_257xf32__timepoint = util.global.load @__constant_tensor_257xf32__timepoint : !stream.timepoint
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%1 = stream.timepoint.await %__constant_tensor_257xf32__timepoint => %__constant_tensor_257xf32 : !stream.resource<constant>{%c1028}
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%3 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%4 = stream.timepoint.immediate => !stream.timepoint
%5 = stream.timepoint.join max(%3, %4, %__constant_tensor_257x128x1x1xf32__timepoint, %__constant_tensor_257xf32__timepoint) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%5) => with(%2 as %arg3: !stream.resource<external>{%c524288}, %__constant_tensor_257x128x1x1xf32 as %arg4: !stream.resource<constant>{%c131584}, %__constant_tensor_257xf32 as %arg5: !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672} {
%7 = stream.async.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg3[%c0 to %c524288 for %c524288], %arg4[%c0 to %c131584 for %c131584], %arg5[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
stream.yield %7 : !stream.resource<external>{%c1052672}
} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %results : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
}
{-#
dialect_resources: {
builtin: {
__onnx_constant_not_found_possibly_due_to_being_elided__: "0x00000000",
__onnx_constant_not_found_possibly_due_to_being_elided___1: "0x00000000"
}
}
#-}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%results:2, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> (!stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) {
%cst = stream.async.constant : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
stream.yield %cst, %cst_0 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
} => !stream.timepoint
util.global.store %result_timepoint, @__constant_tensor_257x128x1x1xf32__timepoint : !stream.timepoint
util.global.store %results#0, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %result_timepoint, @__constant_tensor_257xf32__timepoint : !stream.timepoint
util.global.store %results#1, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32__timepoint = util.global.load @__constant_tensor_257x128x1x1xf32__timepoint : !stream.timepoint
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__timepoint = util.global.load @__constant_tensor_257xf32__timepoint : !stream.timepoint
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.join max(%1, %__constant_tensor_257x128x1x1xf32__timepoint, %__constant_tensor_257xf32__timepoint) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%2) => with(%0 as %arg3: !stream.resource<external>{%c524288}, %__constant_tensor_257x128x1x1xf32 as %arg4: !stream.resource<constant>{%c131584}, %__constant_tensor_257xf32 as %arg5: !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672} {
%4 = stream.async.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_dispatch_0_matmul_like_257x1024x128_f32(%arg3[%c0 to %c524288 for %c524288], %arg4[%c0 to %c131584 for %c131584], %arg5[%c0 to %c1028 for %c1028]) : (!stream.resource<external>{%c524288}, !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672}
stream.yield %4 : !stream.resource<external>{%c1052672}
} => !stream.timepoint
stream.timepoint.chain_external on(#hal.device.affinity<@__device_0>) %result_timepoint => (%arg2 : !hal.fence)
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %results : tensor<1x257x4x256xf32> in !stream.resource<external>{%c1052672} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%0 = util.null : !hal.fence
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @torch_jit$async(%arg0, %0, %fence) : (!hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.initializer {
%c1028 = arith.constant 1028 : index
%c131584 = arith.constant 131584 : index
%results:2, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> (!stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}) {
%cst = stream.async.constant : !stream.resource<constant>{%c131584} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided__> : tensor<257x128x1x1xf32>
%cst_0 = stream.async.constant : !stream.resource<constant>{%c1028} = dense_resource<__onnx_constant_not_found_possibly_due_to_being_elided___1> : tensor<257xf32>
stream.yield %cst, %cst_0 : !stream.resource<constant>{%c131584}, !stream.resource<constant>{%c1028}
} => !stream.timepoint
util.global.store %result_timepoint, @__constant_tensor_257x128x1x1xf32__timepoint : !stream.timepoint
util.global.store %results#0, @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
util.global.store %result_timepoint, @__constant_tensor_257xf32__timepoint : !stream.timepoint
util.global.store %results#1, @__constant_tensor_257xf32 : !stream.resource<constant>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @torch_jit$async(%arg0: !hal.buffer_view, %arg1: !hal.fence, %arg2: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%c1052672 = arith.constant 1052672 : index
%c131584 = arith.constant 131584 : index
%c524288 = arith.constant 524288 : index
%c1 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c4 = arith.constant 4 : index
%c256 = arith.constant 256 : index
%c0 = arith.constant 0 : index
%c1028 = arith.constant 1028 : index
%__constant_tensor_257x128x1x1xf32__timepoint = util.global.load @__constant_tensor_257x128x1x1xf32__timepoint : !stream.timepoint
%__constant_tensor_257x128x1x1xf32 = util.global.load immutable @__constant_tensor_257x128x1x1xf32 : !stream.resource<constant>
%__constant_tensor_257xf32__timepoint = util.global.load @__constant_tensor_257xf32__timepoint : !stream.timepoint
%__constant_tensor_257xf32 = util.global.load immutable @__constant_tensor_257xf32 : !stream.resource<constant>
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("tensor") shape([%c1, %c128, %c4, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x4x256xf32> in !stream.resource<external>{%c524288}
%1 = stream.timepoint.import on(#hal.device.affinity<@__device_0>) %arg1 : (!hal.fence) => !stream.timepoint
%2 = stream.timepoint.join max(%1, %__constant_tensor_257x128x1x1xf32__timepoint, %__constant_tensor_257xf32__timepoint) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%2) => with(%0 as %arg3: !stream.resource<external>{%c524288}, %__constant_tensor_257x128x1x1xf32 as %arg4: !stream.resource<constant>{%c131584}, %__constant_tensor_257xf32 as %arg5: !stream.resource<constant>{%c1028}) -> !stream.resource<external>{%c1052672} {
%4 = stream.async.dispatch @torch_jit$async_dispatch_0::@torch_jit$async_disp
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment