Created
May 21, 2025 21:03
-
-
Save pashu123/8587ca13009205e9e36718b080bccc2f to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
func.func @foo(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %arg1, %c1 : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%3 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%2 : tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%5 = arith.addf %in, %in : f32 | |
linalg.yield %5 : f32 | |
} -> tensor<?x?xf32> | |
return %4 : tensor<?x?xf32> | |
} | |
} | |
// -----// IR Dump After IREEImportPublicPass (iree-import-public) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
util.func public @foo(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %arg1, %c1 : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%3 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%2 : tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%5 = arith.addf %in, %in : f32 | |
linalg.yield %5 : f32 | |
} -> tensor<?x?xf32> | |
util.return %4 : tensor<?x?xf32> | |
} | |
} | |
// -----// IR Dump After ImportMLProgramPass (iree-import-ml-program) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
util.func public @foo(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %arg1, %c1 : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%3 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%2 : tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%5 = arith.addf %in, %in : f32 | |
linalg.yield %5 : f32 | |
} -> tensor<?x?xf32> | |
util.return %4 : tensor<?x?xf32> | |
} | |
} | |
// -----// IR Dump After SanitizeModuleNamesPass (iree-sanitize-module-names) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
util.func public @foo(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %arg1, %c1 : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%3 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%2 : tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%5 = arith.addf %in, %in : f32 | |
linalg.yield %5 : f32 | |
} -> tensor<?x?xf32> | |
util.return %4 : tensor<?x?xf32> | |
} | |
} | |
// -----// IR Dump After ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
util.func public @foo(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %arg1, %c1 : tensor<?x?xf32> | |
%0 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%3 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%2 : tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%5 = arith.addf %in, %in : f32 | |
linalg.yield %5 : f32 | |
} -> tensor<?x?xf32> | |
util.return %4 : tensor<?x?xf32> | |
} | |
} | |
// -----// IR Dump After DemoteF64ToF32Pass (iree-input-conversion-demote-f64-to-f32) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
util.func public @foo(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %arg1, %c1 : tensor<?x?xf32> | |
%0 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%3 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%2 : tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%5 = arith.addf %in, %in : f32 | |
linalg.yield %5 : f32 | |
} -> tensor<?x?xf32> | |
util.return %4 : tensor<?x?xf32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
util.func public @foo(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %arg1, %c1 : tensor<?x?xf32> | |
%0 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%3 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%2 : tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%5 = arith.addf %in, %in : f32 | |
linalg.yield %5 : f32 | |
} -> tensor<?x?xf32> | |
util.return %4 : tensor<?x?xf32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = util.call @_foo(%2, %5) : (tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32> | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %6, %c0 : tensor<?x?xf32> | |
%c1 = arith.constant 1 : index | |
%dim_0 = tensor.dim %6, %c1 : tensor<?x?xf32> | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?xf32>{%dim, %dim_0} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
util.func private @_foo(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %arg1, %c1 : tensor<?x?xf32> | |
%0 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%3 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%2 : tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%5 = arith.addf %in, %in : f32 | |
linalg.yield %5 : f32 | |
} -> tensor<?x?xf32> | |
util.return %4 : tensor<?x?xf32> | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func private @_foo(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %arg1, %c1 : tensor<?x?xf32> | |
%0 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%3 = tensor.empty(%dim, %dim_0) : tensor<?x?xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%2 : tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%5 = arith.addf %in, %in : f32 | |
linalg.yield %5 : f32 | |
} -> tensor<?x?xf32> | |
util.return %4 : tensor<?x?xf32> | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c1 = arith.constant 1 : index | |
%c0 = arith.constant 0 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = util.call @_foo(%2, %5) : (tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32> | |
%dim = tensor.dim %6, %c0 : tensor<?x?xf32> | |
%dim_0 = tensor.dim %6, %c1 : tensor<?x?xf32> | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?xf32>{%dim, %dim_0} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%12 = arith.addf %in, %in : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<?x?xf32> | |
%11 = hal.tensor.export %10 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %11 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%10 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%12 = arith.addf %in, %in : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<?x?xf32> | |
%11 = hal.tensor.export %10 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %11 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%12 = arith.addf %in, %in : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<?x?xf32> | |
%11 = hal.tensor.export %10 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %11 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
module { | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {hal.device.targets = [#device_target_local]} { | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AttrBasedPipelinePass (iree-preprocessing-attr-based-pipeline) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After WarnOnUninitializedValuesPass (iree-global-opt-warn-on-uninitialized-values) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldUnitExtentDimsPass (iree-dispatch-creation-fold-unit-extent-dims) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertStridedContractionToContractionPass (iree-global-opt-convert-strided-contraction-to-contraction) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After PropagateLinalgTransposePass (iree-global-opt-propagate-linalg-transpose) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After HoistIntoGlobalsPass (iree-util-hoist-into-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After JitGlobalsPass (iree-consteval-jit-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-dispatch-creation-tensor-pad-to-tensor-insert-slice) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After HoistIntoGlobalsPass (iree-util-hoist-into-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%8 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%11 = arith.addf %in, %in : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<?x?xf32> | |
%10 = hal.tensor.export %9 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%8 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%10 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%7 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%10 : tensor<?x?xf32>) outs(%6 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%12 = arith.addf %in, %in : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %11 : tensor<?x?xf32> | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%8 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%10 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%10 : tensor<?x?xf32>) outs(%8 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%12 = arith.addf %in, %in : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %11 : tensor<?x?xf32> | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%8 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%10 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%10 : tensor<?x?xf32>) outs(%8 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%12 = arith.addf %in, %in : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %11 : tensor<?x?xf32> | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%8 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%10 = linalg.matmul ins(%2, %5 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%10 : tensor<?x?xf32>) outs(%8 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%12 = arith.addf %in, %in : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %11 : tensor<?x?xf32> | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%8 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%9 = iree_encoding.set_encoding %2 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%10 = iree_encoding.set_encoding %5 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%11 = tensor.empty(%0, %4) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = linalg.fill ins(%cst : f32) outs(%11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%13 = linalg.matmul ins(%9, %10 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%12 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%14 = iree_encoding.unset_encoding %13 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%0, %4} | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%14 : tensor<?x?xf32>) outs(%8 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%16 = arith.addf %in, %in : f32 | |
linalg.yield %16 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %15 : tensor<?x?xf32> | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After HoistEncodingOpsPass (iree-dispatch-creation-hoist-encoding-ops) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = iree_encoding.set_encoding %2 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%7 = iree_encoding.set_encoding %5 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%8 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%10 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%11 = tensor.empty(%0, %4) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = linalg.fill ins(%cst : f32) outs(%11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%13 = linalg.matmul ins(%6, %7 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%12 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%14 = iree_encoding.unset_encoding %13 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%0, %4} | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%14 : tensor<?x?xf32>) outs(%10 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%16 = arith.addf %in, %in : f32 | |
linalg.yield %16 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %15 : tensor<?x?xf32> | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After PropagateEncodingsPass (iree-dispatch-creation-propagate-encodings) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = iree_encoding.set_encoding %2 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%7 = iree_encoding.set_encoding %5 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%8 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%10 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%11 = tensor.empty(%0, %4) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = linalg.fill ins(%cst : f32) outs(%11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%13 = linalg.matmul ins(%6, %7 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%12 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%14 = iree_encoding.unset_encoding %13 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%0, %4} | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%14 : tensor<?x?xf32>) outs(%10 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%16 = arith.addf %in, %in : f32 | |
linalg.yield %16 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %15 : tensor<?x?xf32> | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FuseEncodingOpsIntoDispatchRegionsPass (iree-dispatch-creation-fuse-encoding-ops-into-dispatch-regions-pass) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = iree_encoding.set_encoding %2 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%7 = iree_encoding.set_encoding %5 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%8 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%10 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%11 = tensor.empty(%0, %4) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = linalg.fill ins(%cst : f32) outs(%11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%13 = linalg.matmul ins(%6, %7 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%12 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%14 = iree_encoding.unset_encoding %13 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%0, %4} | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%14 : tensor<?x?xf32>) outs(%10 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%16 = arith.addf %in, %in : f32 | |
linalg.yield %16 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %15 : tensor<?x?xf32> | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ConvertEncodingToFlowPass (iree-dispatch-creation-convert-encoding-to-flow) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %2, %c0 : tensor<?x?xf32> | |
%c1 = arith.constant 1 : index | |
%dim_0 = tensor.dim %2, %c1 : tensor<?x?xf32> | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%dim, %dim_0} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%dim, %dim_0} | |
%c0_1 = arith.constant 0 : index | |
%dim_2 = tensor.dim %5, %c0_1 : tensor<?x?xf32> | |
%c1_3 = arith.constant 1 : index | |
%dim_4 = tensor.dim %5, %c1_3 : tensor<?x?xf32> | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%dim_2, %dim_4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%dim_2, %dim_4} | |
%8 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%10 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%11 = tensor.empty(%0, %4) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = linalg.fill ins(%cst : f32) outs(%11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%13 = linalg.matmul ins(%6, %7 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%12 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%14 = iree_encoding.unset_encoding %13 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%0, %4} | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%14 : tensor<?x?xf32>) outs(%10 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%16 = arith.addf %in, %in : f32 | |
linalg.yield %16 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %15 : tensor<?x?xf32> | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After HoistIntoGlobalsPass (iree-util-hoist-into-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %2, %c0 : tensor<?x?xf32> | |
%c1 = arith.constant 1 : index | |
%dim_0 = tensor.dim %2, %c1 : tensor<?x?xf32> | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%dim, %dim_0} -> tensor<?x?xf32, #encoding>{%dim, %dim_0} | |
%c0_1 = arith.constant 0 : index | |
%dim_2 = tensor.dim %5, %c0_1 : tensor<?x?xf32> | |
%c1_3 = arith.constant 1 : index | |
%dim_4 = tensor.dim %5, %c1_3 : tensor<?x?xf32> | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%dim_2, %dim_4} -> tensor<?x?xf32, #encoding1>{%dim_2, %dim_4} | |
%8 = flow.dispatch.region -> (tensor<?x?xf32>{%0, %4}) { | |
%10 = tensor.empty(%0, %4) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%11 = tensor.empty(%0, %4) : tensor<?x?xf32, #encoding2> | |
%12 = linalg.fill ins(%cst : f32) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = linalg.matmul ins(%6, %7 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%12 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%14 = iree_encoding.unset_encoding %13 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %4} | |
%15 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%14 : tensor<?x?xf32>) outs(%10 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%16 = arith.addf %in, %in : f32 | |
linalg.yield %16 : f32 | |
} -> tensor<?x?xf32> | |
flow.return %15 : tensor<?x?xf32> | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %2, %c0 : tensor<?x?xf32> | |
%c1 = arith.constant 1 : index | |
%dim_0 = tensor.dim %2, %c1 : tensor<?x?xf32> | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%dim, %dim_0} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%dim, %dim_0} | |
%c0_1 = arith.constant 0 : index | |
%dim_2 = tensor.dim %5, %c0_1 : tensor<?x?xf32> | |
%c1_3 = arith.constant 1 : index | |
%dim_4 = tensor.dim %5, %c1_3 : tensor<?x?xf32> | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%dim_2, %dim_4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%dim_2, %dim_4} | |
%8 = flow.dispatch.workgroups(%0, %4, %6, %7, %dim, %dim_0, %dim_2, %dim_4, %0, %4) : (index, index, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%dim, %dim_0}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%dim_2, %dim_4}, index, index, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: index, %arg3: index, %arg4: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg5: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg6: index, %arg7: index, %arg8: index, %arg9: index, %arg10: index, %arg11: index, %arg12: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%10 = iree_tensor_ext.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [%arg6, %arg7], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg6, %arg7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%11 = iree_tensor_ext.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [%arg8, %arg9], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg8, %arg9} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = tensor.empty(%arg10, %arg11) : tensor<?x?xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%13 = tensor.empty(%arg10, %arg11) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%15 = linalg.matmul ins(%10, %11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%14 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%16 = iree_encoding.unset_encoding %15 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%arg10, %arg11} | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%16 : tensor<?x?xf32>) outs(%12 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%18 = arith.addf %in, %in : f32 | |
linalg.yield %18 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %17, %arg12, offsets = [0, 0], sizes = [%arg10, %arg11], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%arg10, %arg11} | |
flow.return | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch.workgroups(%0, %4, %6, %7, %0, %1, %3, %4, %0, %4) : (index, index, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: index, %arg3: index, %arg4: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg5: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg6: index, %arg7: index, %arg8: index, %arg9: index, %arg10: index, %arg11: index, %arg12: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = iree_tensor_ext.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [%arg6, %arg7], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg6, %arg7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%11 = iree_tensor_ext.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [%arg8, %arg9], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg8, %arg9} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = tensor.empty(%arg10, %arg11) : tensor<?x?xf32> | |
%13 = tensor.empty(%arg10, %arg11) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%15 = linalg.matmul ins(%10, %11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%14 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%16 = iree_encoding.unset_encoding %15 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%arg10, %arg11} | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%16 : tensor<?x?xf32>) outs(%12 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%18 = arith.addf %in, %in : f32 | |
linalg.yield %18 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %17, %arg12, offsets = [0, 0], sizes = [%arg10, %arg11], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%arg10, %arg11} | |
flow.return | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch.workgroups(%0, %4, %6, %7, %0, %1, %3, %4, %0, %4) : (index, index, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: index, %arg3: index, %arg4: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg5: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg6: index, %arg7: index, %arg8: index, %arg9: index, %arg10: index, %arg11: index, %arg12: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = iree_tensor_ext.dispatch.tensor.load %arg4, offsets = [0, 0], sizes = [%arg6, %arg7], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg6, %arg7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%11 = iree_tensor_ext.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [%arg8, %arg9], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg8, %arg9} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = tensor.empty(%arg10, %arg11) : tensor<?x?xf32> | |
%13 = tensor.empty(%arg10, %arg11) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%15 = linalg.matmul ins(%10, %11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%14 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%16 = iree_encoding.unset_encoding %15 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%arg10, %arg11} | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%16 : tensor<?x?xf32>) outs(%12 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%18 = arith.addf %in, %in : f32 | |
linalg.yield %18 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %17, %arg12, offsets = [0, 0], sizes = [%arg10, %arg11], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%arg10, %arg11} | |
flow.return | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch.workgroups(%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg4: index, %arg5: index, %arg6: index, %arg7: index, %arg8: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [%arg4, %arg5], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg4, %arg5} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%11 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [%arg6, %arg7], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg6, %arg7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = tensor.empty(%arg4, %arg7) : tensor<?x?xf32> | |
%13 = tensor.empty(%arg4, %arg7) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%15 = linalg.matmul ins(%10, %11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%14 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%16 = iree_encoding.unset_encoding %15 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%arg4, %arg7} | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%16 : tensor<?x?xf32>) outs(%12 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%18 = arith.addf %in, %in : f32 | |
linalg.yield %18 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %17, %arg8, offsets = [0, 0], sizes = [%arg4, %arg7], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%arg4, %arg7} | |
flow.return | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch.workgroups[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg4: index, %arg5: index, %arg6: index, %arg7: index, %arg8: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%10 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 0 : index | |
%11 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 1 : index | |
%12 = iree_tensor_ext.dispatch.workload.ordinal %arg6, 2 : index | |
%13 = iree_tensor_ext.dispatch.workload.ordinal %arg7, 3 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%14 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [%10, %11], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%10, %11} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%15 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [%12, %13], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%12, %13} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%16 = tensor.empty(%10, %13) : tensor<?x?xf32> | |
%17 = tensor.empty(%10, %13) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%18 = linalg.fill ins(%cst : f32) outs(%17 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%19 = linalg.matmul ins(%14, %15 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%18 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%20 = iree_encoding.unset_encoding %19 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%10, %13} | |
%21 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%20 : tensor<?x?xf32>) outs(%16 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%22 = arith.addf %in, %in : f32 | |
linalg.yield %22 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %21, %arg8, offsets = [0, 0], sizes = [%10, %13], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%10, %13} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4, %arg5 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch.workgroups[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg4: index, %arg5: index, %arg6: index, %arg7: index, %arg8: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%10 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 0 : index | |
%11 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 1 : index | |
%12 = iree_tensor_ext.dispatch.workload.ordinal %arg6, 2 : index | |
%13 = iree_tensor_ext.dispatch.workload.ordinal %arg7, 3 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%14 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [%10, %11], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%10, %11} -> tensor<?x?xf32, #encoding> | |
%15 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [%12, %13], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%12, %13} -> tensor<?x?xf32, #encoding1> | |
%16 = tensor.empty(%10, %13) : tensor<?x?xf32> | |
%17 = tensor.empty(%10, %13) : tensor<?x?xf32, #encoding2> | |
%18 = linalg.fill ins(%cst : f32) outs(%17 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%19 = linalg.matmul ins(%14, %15 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%18 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%20 = iree_encoding.unset_encoding %19 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%10, %13} | |
%21 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%20 : tensor<?x?xf32>) outs(%16 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%22 = arith.addf %in, %in : f32 | |
linalg.yield %22 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %21, %arg8, offsets = [0, 0], sizes = [%10, %13], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%10, %13} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4, %arg5 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch.workgroups[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg4: index, %arg5: index, %arg6: index, %arg7: index, %arg8: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 0 : index | |
%11 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 1 : index | |
%12 = iree_tensor_ext.dispatch.workload.ordinal %arg6, 2 : index | |
%13 = iree_tensor_ext.dispatch.workload.ordinal %arg7, 3 : index | |
%14 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [%10, %11], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%10, %11} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%15 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [%12, %13], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%12, %13} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%16 = tensor.empty(%10, %13) : tensor<?x?xf32> | |
%17 = tensor.empty(%10, %13) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%18 = linalg.fill ins(%cst : f32) outs(%17 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%19 = linalg.matmul ins(%14, %15 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%18 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%20 = iree_encoding.unset_encoding %19 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%10, %13} | |
%21 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%20 : tensor<?x?xf32>) outs(%16 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%22 = arith.addf %in, %in : f32 | |
linalg.yield %22 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %21, %arg8, offsets = [0, 0], sizes = [%10, %13], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%10, %13} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4, %arg5 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch.workgroups[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg4: index, %arg5: index, %arg6: index, %arg7: index, %arg8: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%10 = flow.dispatch.tie_shape %arg2 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg4, %arg5} | |
%11 = flow.dispatch.tie_shape %arg3 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%arg6, %arg7} | |
%12 = flow.dispatch.tie_shape %arg8 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%arg4, %arg7} | |
%cst = arith.constant 0.000000e+00 : f32 | |
%13 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 0 : index | |
%14 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 1 : index | |
%15 = iree_tensor_ext.dispatch.workload.ordinal %arg6, 2 : index | |
%16 = iree_tensor_ext.dispatch.workload.ordinal %arg7, 3 : index | |
%17 = iree_tensor_ext.dispatch.tensor.load %10, offsets = [0, 0], sizes = [%13, %14], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%13, %14} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%18 = iree_tensor_ext.dispatch.tensor.load %11, offsets = [0, 0], sizes = [%15, %16], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%15, %16} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%19 = tensor.empty(%13, %16) : tensor<?x?xf32> | |
%20 = tensor.empty(%13, %16) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%22 = linalg.matmul ins(%17, %18 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%21 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%23 = iree_encoding.unset_encoding %22 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%13, %16} | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%23 : tensor<?x?xf32>) outs(%19 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%25 = arith.addf %in, %in : f32 | |
linalg.yield %25 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %24, %12, offsets = [0, 0], sizes = [%13, %16], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%13, %16} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4, %arg5 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch.workgroups[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg4: index, %arg5: index, %arg6: index, %arg7: index, %arg8: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 0 : index | |
%11 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 1 : index | |
%12 = iree_tensor_ext.dispatch.workload.ordinal %arg6, 2 : index | |
%13 = iree_tensor_ext.dispatch.workload.ordinal %arg7, 3 : index | |
%14 = flow.dispatch.tie_shape %arg2 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%10, %11} | |
%15 = flow.dispatch.tie_shape %arg3 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%12, %13} | |
%16 = flow.dispatch.tie_shape %arg8 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%10, %13} | |
%17 = iree_tensor_ext.dispatch.tensor.load %14, offsets = [0, 0], sizes = [%10, %11], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%10, %11} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%18 = iree_tensor_ext.dispatch.tensor.load %15, offsets = [0, 0], sizes = [%12, %13], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%12, %13} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%19 = tensor.empty(%10, %13) : tensor<?x?xf32> | |
%20 = tensor.empty(%10, %13) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%22 = linalg.matmul ins(%17, %18 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%21 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%23 = iree_encoding.unset_encoding %22 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%10, %13} | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%23 : tensor<?x?xf32>) outs(%19 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%25 = arith.addf %in, %in : f32 | |
linalg.yield %25 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %24, %16, offsets = [0, 0], sizes = [%10, %13], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%10, %13} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4, %arg5 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch.workgroups[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg4: index, %arg5: index, %arg6: index, %arg7: index, %arg8: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 0 : index | |
%11 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 1 : index | |
%12 = iree_tensor_ext.dispatch.workload.ordinal %arg6, 2 : index | |
%13 = iree_tensor_ext.dispatch.workload.ordinal %arg7, 3 : index | |
%14 = flow.dispatch.tie_shape %arg2 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%10, %11} | |
%15 = flow.dispatch.tie_shape %arg3 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%12, %13} | |
%16 = flow.dispatch.tie_shape %arg8 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%10, %13} | |
%17 = iree_tensor_ext.dispatch.tensor.load %14, offsets = [0, 0], sizes = [%10, %11], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%10, %11} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%18 = iree_tensor_ext.dispatch.tensor.load %15, offsets = [0, 0], sizes = [%12, %13], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%12, %13} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%19 = tensor.empty(%10, %13) : tensor<?x?xf32> | |
%20 = tensor.empty(%10, %13) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%22 = linalg.matmul ins(%17, %18 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%21 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%23 = iree_encoding.unset_encoding %22 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%10, %13} | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%23 : tensor<?x?xf32>) outs(%19 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%25 = arith.addf %in, %in : f32 | |
linalg.yield %25 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %24, %16, offsets = [0, 0], sizes = [%10, %13], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%10, %13} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4, %arg5 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch.workgroups[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg4: index, %arg5: index, %arg6: index, %arg7: index, %arg8: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 0 : index | |
%11 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 1 : index | |
%12 = iree_tensor_ext.dispatch.workload.ordinal %arg6, 2 : index | |
%13 = iree_tensor_ext.dispatch.workload.ordinal %arg7, 3 : index | |
%14 = flow.dispatch.tie_shape %arg2 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%10, %11} | |
%15 = flow.dispatch.tie_shape %arg3 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%12, %13} | |
%16 = flow.dispatch.tie_shape %arg8 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%10, %13} | |
%17 = iree_tensor_ext.dispatch.tensor.load %14, offsets = [0, 0], sizes = [%10, %11], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%10, %11} -> tensor<?x?xf32, #encoding> | |
%18 = iree_tensor_ext.dispatch.tensor.load %15, offsets = [0, 0], sizes = [%12, %13], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%12, %13} -> tensor<?x?xf32, #encoding1> | |
%19 = tensor.empty(%10, %13) : tensor<?x?xf32> | |
%20 = tensor.empty(%10, %13) : tensor<?x?xf32, #encoding2> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%22 = linalg.matmul ins(%17, %18 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%21 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%23 = iree_encoding.unset_encoding %22 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%10, %13} | |
%24 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%23 : tensor<?x?xf32>) outs(%19 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%25 = arith.addf %in, %in : f32 | |
linalg.yield %25 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %24, %16, offsets = [0, 0], sizes = [%10, %13], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%10, %13} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4, %arg5 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After StripDebugOpsPass (iree-util-strip-debug-ops) //----- // | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%2, %3} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineConstantsPass (iree-flow-outline-constants) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizePass (iree-flow-canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CloneToConsumersPass (iree-stream-clone-to-consumers) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @foo_dispatch_0 { | |
flow.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = flow.dispatch.tie_shape %arg0 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = flow.dispatch.tie_shape %arg1 : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = flow.dispatch.tie_shape %arg6 : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} | |
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%5 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?x?xf32>{%3, %4} | |
%6 = flow.tensor.encode %2 : tensor<?x?xf32>{%0, %1} -> tensor<?x?xf32, #encoding>{%0, %1} | |
%7 = flow.tensor.encode %5 : tensor<?x?xf32>{%3, %4} -> tensor<?x?xf32, #encoding1>{%3, %4} | |
%8 = flow.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %3, %4](%6, %7, %0, %1, %3, %4) : (tensor<?x?xf32, #encoding>{%0, %1}, tensor<?x?xf32, #encoding1>{%3, %4}, index, index, index, index) -> tensor<?x?xf32>{%0, %4} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?xf32>{%0, %4} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%element_type_f32_0 = hal.element_type<f32> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32_0) encoding(%dense_row_major_1) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding1>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%element_type_f32_0 = hal.element_type<f32> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32_0) encoding(%dense_row_major_1) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding1>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>>{%2, %3} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%element_type_f32_0 = hal.element_type<f32> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32_0) encoding(%dense_row_major_1) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%element_type_f32_0 = hal.element_type<f32> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32_0) encoding(%dense_row_major_1) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding1>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
%element_type_f32_0 = hal.element_type<f32> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32_0) encoding(%dense_row_major_1) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding1>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding1>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding1>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CombineInitializersPass (iree-util-combine-initializers) //----- // | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding1 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding2 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding1> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding2> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding>, tensor<?x?xf32, #encoding1>) outs(%11 : tensor<?x?xf32, #encoding2>) -> tensor<?x?xf32, #encoding2> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding2> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding1>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SpecializeEncodingsPass (iree-stream-specialize-encodings) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%5, %6} : index | |
%8 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%7} | |
%9 = stream.async.transfer %8 : !stream.resource<external>{%7} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%7} | |
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding>{%0, %1} : index | |
%11 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %4 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%2} -> tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10} | |
%12 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32, #encoding1>{%5, %6} : index | |
%13 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %9 : tensor<?x?xf32>{%5, %6} in !stream.resource<*>{%7} -> tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?xf32>{%0, %6} : index | |
%15 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%11, %13, %0, %1, %5, %6) : (tensor<?x?xf32, #encoding>{%0, %1} in !stream.resource<*>{%10}, tensor<?x?xf32, #encoding1>{%5, %6} in !stream.resource<*>{%12}, index, index, index, index) -> tensor<?x?xf32>{%0, %6} in !stream.resource<*>{%14} | |
%16 = stream.async.transfer %15 : !stream.resource<*>{%14} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%14} | |
%17 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %16 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%14} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]>>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]>>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]>>>{%0, %1} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]>>>{%2, %3} -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>, tensor<?x?xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) outs(%11 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>>) -> tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [?, ?, ?]>> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c8 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %14, %1 : index | |
%16 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %5 : tensor<?x?xf32>{%0, %1} in !stream.resource<*>{%3} -> tensor<?x?xf32, #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]>>{%0, %1} in !stream.resource<*>{%15} | |
%17 = arith.ceildivsi %7, %c4 : index | |
%18 = arith.muli %17, %c4 : index | |
%19 = arith.muli %6, %c4 : index | |
%20 = arith.muli %19, %18 : index | |
%21 = stream.tensor.encode on(#hal.device.affinity<@__device_0>) %11 : tensor<?x?xf32>{%6, %7} in !stream.resource<*>{%9} -> tensor<?x?xf32, #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]>>{%6, %7} in !stream.resource<*>{%20} | |
%22 = arith.muli %0, %c4 : index | |
%23 = arith.muli %22, %7 : index | |
%24 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%16[%c0 to %15 for %15], %21[%c0 to %20 for %20], %0, %1, %6, %7) : (!stream.resource<*>{%15}, !stream.resource<*>{%20}, index, index, index, index) -> !stream.resource<*>{%23} | |
%25 = stream.async.transfer %24 : !stream.resource<*>{%23} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%23} | |
%26 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %25 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%23} -> !hal.buffer_view | |
util.return %26 : !hal.buffer_view | |
} | |
// -----// IR Dump After MaterializeEncodingsPass (iree-stream-materialize-encodings) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%c0 = arith.constant 0 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%c0 = arith.constant 0 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c8 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %14, %1 : index | |
%c0_0 = arith.constant 0 : index | |
%16 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0_0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.ceildivsi %7, %c4 : index | |
%18 = arith.muli %17, %c4 : index | |
%19 = arith.muli %6, %c4 : index | |
%20 = arith.muli %19, %18 : index | |
%c0_1 = arith.constant 0 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0_1 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = arith.muli %0, %c4 : index | |
%23 = arith.muli %22, %7 : index | |
%24 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%16[%c0 to %15 for %15], %21[%c0 to %20 for %20], %0, %1, %6, %7) : (!stream.resource<*>{%15}, !stream.resource<*>{%20}, index, index, index, index) -> !stream.resource<*>{%23} | |
%25 = stream.async.transfer %24 : !stream.resource<*>{%23} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%23} | |
%26 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %25 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%23} -> !hal.buffer_view | |
util.return %26 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %6, %c4 : index | |
%19 = arith.muli %18, %17 : index | |
%20 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%19} | |
%21 = arith.muli %0, %c4 : index | |
%22 = arith.muli %21, %7 : index | |
%23 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %20[%c0 to %19 for %19], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%19}, index, index, index, index) -> !stream.resource<*>{%22} | |
%24 = stream.async.transfer %23 : !stream.resource<*>{%22} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%22} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%22} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%7 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%6, %7]) type(%element_type_f32) encoding(%dense_row_major) | |
%8 = arith.muli %6, %c4 : index | |
%9 = arith.muli %8, %7 : index | |
%10 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%6, %7} in !stream.resource<external>{%9} | |
%11 = stream.async.transfer %10 : !stream.resource<external>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%9} | |
%12 = arith.ceildivsi %0, %c8 : index | |
%13 = arith.muli %12, %c32 : index | |
%14 = arith.muli %13, %1 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%5[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<*>{%3}, index, index, index, index) -> !stream.resource<*>{%14} | |
%16 = arith.ceildivsi %7, %c4 : index | |
%17 = arith.muli %16, %c4 : index | |
%18 = arith.muli %8, %17 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%6, %7, %6, %7](%11[%c0 to %9 for %9], %6, %7, %6, %7) : (!stream.resource<*>{%9}, index, index, index, index) -> !stream.resource<*>{%18} | |
%20 = arith.muli %2, %7 : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %6, %7](%15[%c0 to %14 for %14], %19[%c0 to %18 for %18], %0, %1, %6, %7) : (!stream.resource<*>{%14}, !stream.resource<*>{%18}, index, index, index, index) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %7} in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%14 = arith.ceildivsi %6, %c4 : index | |
%15 = arith.muli %14, %c4 : index | |
%16 = arith.muli %7, %15 : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%9[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%16} | |
%18 = arith.muli %2, %6 : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%13[%c0 to %12 for %12], %17[%c0 to %16 for %16], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%16}, index, index, index, index) -> !stream.resource<external>{%18} | |
%20 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %19 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg2[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%20 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg3[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
%21 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19[%c0 to %12 for %12], %20[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %21 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After SyncInitializersPass (iree-stream-sync-initializers) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%17 = stream.timepoint.immediate => !stream.timepoint | |
%18 = stream.timepoint.immediate => !stream.timepoint | |
%19 = stream.timepoint.join max(%17, %18) => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%19) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%22:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%24 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%25 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %24, %25 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%23 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%22#0[%c0 to %12 for %12], %22#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %23 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%20 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%21 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %20 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%17 = stream.timepoint.immediate => !stream.timepoint | |
%18 = stream.timepoint.immediate => !stream.timepoint | |
%19 = stream.timepoint.join max(%17, %18) => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%19) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%22:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%24 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%25 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %24, %25 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%23 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%22#0[%c0 to %12 for %12], %22#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %23 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%20 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%21 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %20 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}) -> !stream.resource<external>{%16} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%8}) -> (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}) { | |
%21 = stream.async.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%arg4[%c0 to %3 for %3], %0, %1, %0, %1) : (!stream.resource<external>{%3}, index, index, index, index) -> !stream.resource<transient>{%12} | |
%22 = stream.async.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%arg5[%c0 to %8 for %8], %5, %6, %5, %6) : (!stream.resource<external>{%8}, index, index, index, index) -> !stream.resource<transient>{%15} | |
stream.yield %21, %22 : !stream.resource<transient>{%12}, !stream.resource<transient>{%15} | |
} | |
%20 = stream.async.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%19#0[%c0 to %12 for %12], %19#1[%c0 to %15 for %15], %0, %1, %5, %6) : (!stream.resource<transient>{%12}, !stream.resource<transient>{%15}, index, index, index, index) -> !stream.resource<external>{%16} | |
stream.yield %20 : !stream.resource<external>{%16} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%16} | |
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ScheduleAllocationPass (iree-stream-schedule-allocation) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17:3 = stream.resource.pack on(#hal.device.affinity<@__device_0>) slices({ | |
[0, 1] = %12, | |
[0, 1] = %15 | |
}) : index | |
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%17#0} => !stream.timepoint | |
%18 = stream.timepoint.join max(%result_timepoint, %result_timepoint_2) => !stream.timepoint | |
%19 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%18) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_1 as %arg5: !stream.resource<transient>{%17#0}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%17#1 for %12] : !stream.resource<transient>{%17#0} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17#2 for %15] : !stream.resource<transient>{%17#0} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%17#1 for %12] : !stream.resource<transient>{%17#0}, | |
ro %arg5[%17#2 for %15] : !stream.resource<transient>{%17#0}, | |
wo %arg4[%c0_0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%20 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%19) => %result_1 : !stream.resource<transient>{%17#0} => !stream.timepoint | |
%21 = stream.timepoint.join max(%20, %19) => !stream.timepoint | |
%22 = stream.timepoint.await %21 => %result : !stream.resource<external>{%16} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17:3 = stream.resource.pack on(#hal.device.affinity<@__device_0>) slices({ | |
[0, 1] = %12, | |
[0, 1] = %15 | |
}) : index | |
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%17#0} => !stream.timepoint | |
%18 = stream.timepoint.join max(%result_timepoint, %result_timepoint_2) => !stream.timepoint | |
%19 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%18) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_1 as %arg5: !stream.resource<transient>{%17#0}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%17#1 for %12] : !stream.resource<transient>{%17#0} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17#2 for %15] : !stream.resource<transient>{%17#0} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%17#1 for %12] : !stream.resource<transient>{%17#0}, | |
ro %arg5[%17#2 for %15] : !stream.resource<transient>{%17#0}, | |
wo %arg4[%c0_0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%20 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%19) => %result_1 : !stream.resource<transient>{%17#0} => !stream.timepoint | |
%21 = stream.timepoint.join max(%20, %19) => !stream.timepoint | |
%22 = stream.timepoint.await %21 => %result : !stream.resource<external>{%16} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%c0_1 = arith.constant 0 : index | |
%c64 = arith.constant 64 : index | |
%17 = util.align %12, %c64 : index | |
%18 = arith.addi %17, %c0_1 : index | |
%c64_2 = arith.constant 64 : index | |
%c64_3 = arith.constant 64 : index | |
%19 = util.align %15, %c64_3 : index | |
%20 = arith.addi %18, %19 : index | |
%c64_4 = arith.constant 64 : index | |
%c64_5 = arith.constant 64 : index | |
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%20} => !stream.timepoint | |
%21 = stream.timepoint.join max(%result_timepoint, %result_timepoint_7) => !stream.timepoint | |
%22 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%21) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_6 as %arg5: !stream.resource<transient>{%20}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0_1 for %12] : !stream.resource<transient>{%20} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%18 for %15] : !stream.resource<transient>{%20} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0_1 for %12] : !stream.resource<transient>{%20}, | |
ro %arg5[%18 for %15] : !stream.resource<transient>{%20}, | |
wo %arg4[%c0_0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%23 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%22) => %result_6 : !stream.resource<transient>{%20} => !stream.timepoint | |
%24 = stream.timepoint.join max(%23, %22) => !stream.timepoint | |
%25 = stream.timepoint.await %24 => %result : !stream.resource<external>{%16} | |
%26 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %25 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %26 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After PropagateSubrangesPass (iree-util-propagate-subranges) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(%22, %21) => !stream.timepoint | |
%24 = stream.timepoint.await %23 => %result : !stream.resource<external>{%16} | |
%25 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %24 : tensor<?x?xf32>{%0, %6} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %25 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#encoding = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [0, 1], innerTileSizes = [8, 1], outerDimsPerm = [0, 1]}}>]> | |
#encoding1 = #iree_encoding.layout<[#iree_cpu.cpu_encoding_layout<configuration = {encoding_info = {innerDimsPos = [1, 0], innerTileSizes = [4, 1], outerDimsPerm = [1, 0]}}>]> | |
#executable_target_embedded_elf_x86_64 = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", iree.encoding.resolver = #iree_cpu.cpu_encoding_layout<>, max_stack_allocation_size = 32768 : i64, native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#map3 = affine_map<(d0, d1) -> (d0, d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64]> : !hal.device | |
#encoding2 = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding3 = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
#encoding4 = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @foo_dispatch_0 { | |
stream.executable.export public @foo_dispatch_0_matmul_DxDxD_f32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @foo_dispatch_0_matmul_DxDxD_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg5, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} | |
%5 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = stream.binding.subspan %arg6[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
%7 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding>>{%0, %1} -> tensor<?x?xf32, #encoding2> | |
%8 = iree_tensor_ext.dispatch.tensor.load %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding1>>{%2, %3} -> tensor<?x?xf32, #encoding3> | |
%9 = tensor.empty(%0, %3) : tensor<?x?xf32> | |
%10 = tensor.empty(%0, %3) : tensor<?x?xf32, #encoding4> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%12 = linalg.matmul ins(%7, %8 : tensor<?x?xf32, #encoding2>, tensor<?x?xf32, #encoding3>) outs(%11 : tensor<?x?xf32, #encoding4>) -> tensor<?x?xf32, #encoding4> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?xf32, #encoding4> -> tensor<?x?xf32>{%0, %3} | |
%14 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%13 : tensor<?x?xf32>) outs(%9 : tensor<?x?xf32>) { | |
^bb0(%in: f32, %out: f32): | |
%15 = arith.addf %in, %in : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<?x?xf32> | |
iree_tensor_ext.dispatch.tensor.store %14, %6, offsets = [0, 0], sizes = [%0, %3], strides = [1, 1] : tensor<?x?xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32>>{%0, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_0 { | |
stream.executable.export public @_encoding_0_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_0_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding>>{%2, %3} | |
return | |
} | |
} | |
} | |
stream.executable private @_encoding_1 { | |
stream.executable.export public @_encoding_1_encode_DxDxf32_to_DxDxf32 workgroups(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2, %arg3 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @_encoding_1_encode_DxDxf32_to_DxDxf32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = iree_tensor_ext.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = iree_tensor_ext.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = iree_tensor_ext.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = iree_tensor_ext.dispatch.workload.ordinal %arg4, 3 : index | |
%4 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} | |
%5 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
%6 = iree_tensor_ext.dispatch.tensor.load %4, offsets = [0, 0], sizes = [%0, %1], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%0, %1} -> tensor<?x?xf32> | |
%7 = iree_encoding.set_encoding %6 : tensor<?x?xf32> -> tensor<?x?xf32, #encoding1> | |
iree_tensor_ext.dispatch.tensor.store %7, %5, offsets = [0, 0], sizes = [%2, %3], strides = [1, 1] : tensor<?x?xf32, #encoding1> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<?x?xf32, #encoding1>>{%2, %3} | |
return | |
} | |
} | |
} | |
util.func public @foo(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @foo(%input0: tensor<?x?xf32>, %input1: tensor<?x?xf32>) -> (%output0: tensor<?x?xf32>)"}} { | |
%c64 = arith.constant 64 : index | |
%c32 = arith.constant 32 : index | |
%c0 = arith.constant 0 : index | |
%c8 = arith.constant 8 : index | |
%c4 = arith.constant 4 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c4 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index | |
%6 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[1] : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%5, %6]) type(%element_type_f32) encoding(%dense_row_major) | |
%7 = arith.muli %5, %c4 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?x?xf32>{%5, %6} in !stream.resource<external>{%8} | |
%10 = arith.ceildivsi %0, %c8 : index | |
%11 = arith.muli %10, %c32 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = arith.ceildivsi %6, %c4 : index | |
%14 = arith.muli %13, %c4 : index | |
%15 = arith.muli %7, %14 : index | |
%16 = arith.muli %2, %6 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%16} => !stream.timepoint | |
%17 = util.align %12, %c64 : index | |
%18 = util.align %15, %c64 : index | |
%19 = arith.addi %17, %18 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%19} => !stream.timepoint | |
%20 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%21 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%20) => with(%4 as %arg2: !stream.resource<external>{%3}, %9 as %arg3: !stream.resource<external>{%8}, %result as %arg4: !stream.resource<external>{%16}, %result_0 as %arg5: !stream.resource<transient>{%19}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @_encoding_0::@_encoding_0_encode_DxDxf32_to_DxDxf32[%0, %1, %0, %1](%0, %1, %0, %1 : index, index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %12] : !stream.resource<transient>{%19} | |
} | |
stream.cmd.dispatch @_encoding_1::@_encoding_1_encode_DxDxf32_to_DxDxf32[%5, %6, %5, %6](%5, %6, %5, %6 : index, index, index, index) { | |
ro %arg3[%c0 for %8] : !stream.resource<external>{%8}, | |
wo %arg5[%17 for %15] : !stream.resource<transient>{%19} | |
} | |
} | |
stream.cmd.dispatch @foo_dispatch_0::@foo_dispatch_0_matmul_DxDxD_f32[%0, %1, %5, %6](%0, %1, %5, %6 : index, index, index, index) { | |
ro %arg5[%c0 for %12] : !stream.resource<transient>{%19}, | |
ro %arg5[%17 for %15] : !stream.resource<transient>{%19}, | |
wo %arg4[%c0 for %16] : !stream.resource<external>{%16} | |
} | |
} => !stream.timepoint | |
%22 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%21) => %result_0 : !stream.resource<transient>{%19} => !stream.timepoint | |
%23 = stream.timepoint.join max(% |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment