Created
August 19, 2024 17:36
-
-
Save pashu123/a1ca7ceacaa10a1e2ee62de71da66b48 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// -----// IR Dump After AutoInputConversionPipeline (iree-auto-input-conversion) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After IREEImportPublic (iree-import-public) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After ImportMLProgram (iree-import-ml-program) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After SanitizeModuleNames (iree-sanitize-module-names) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After DemoteF64ToF32 (iree-input-conversion-demote-f64-to-f32) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = util.call @_matmul_broad(%2, %3) : (tensor<?x?x3200xf32>, tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %4, %c0 : tensor<?x?x8640xf32> | |
%c1 = arith.constant 1 : index | |
%dim_0 = tensor.dim %4, %c1 : tensor<?x?x8640xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<?x?x8640xf32>{%dim, %dim_0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
util.func private @_matmul_broad(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func private @_matmul_broad(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c1 = arith.constant 1 : index | |
%c0 = arith.constant 0 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = util.call @_matmul_broad(%2, %3) : (tensor<?x?x3200xf32>, tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> | |
%dim = tensor.dim %4, %c0 : tensor<?x?x8640xf32> | |
%dim_0 = tensor.dim %4, %c1 : tensor<?x?x8640xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<?x?x8640xf32>{%dim, %dim_0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
module { | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {hal.device.targets = [#device_target_local]} { | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Convert1X1FilterConv2DToMatmulPass (iree-global-opt-convert-1x1-filter-conv2d-to-matmul) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldUnitExtentDimsPass (iree-flow-fold-unit-extent-dims) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After HoistIntoGlobals (iree-util-hoist-into-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After JitGlobals (iree-consteval-jit-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-flow-tensor-pad-to-tensor-insert-slice) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FusionPreprocessingPass (iree-flow-fusion-preprocessing) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-flow-elementwise-op-fusion) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExpandShapesPass (iree-flow-bubble-up-expand-shapes) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-flow-elementwise-op-fusion) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SinkReshapesPass (iree-flow-sink-reshapes) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-flow-fuse-multi-use-elementwise-producer) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SplitReductionPass (iree-flow-split-reduction-ops) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After TransposeGenericOpsPass (iree-flow-transpose-generic-ops) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormScalarDispatchesPass (iree-flow-form-scalar-dispatches) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormDispatchRegionsPass (iree-flow-form-dispatch-regions) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = flow.dispatch.region -> (tensor<?x8640x3200xf16>{%0}) { | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
flow.return %10 : tensor<?x8640x3200xf16> | |
} | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = flow.dispatch.region -> (tensor<?x?x8640xf32>{%0, %1}) { | |
%10 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
flow.return %10 : tensor<?x?x8640xf32> | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-flow-clone-producers-into-dispatch-regions) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = flow.dispatch.region -> (tensor<?x8640x3200xf16>{%0}) { | |
%10 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
flow.return %11 : tensor<?x8640x3200xf16> | |
} | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = flow.dispatch.region -> (tensor<?x?x8640xf32>{%0, %1}) { | |
%10 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%cst_0 = arith.constant 0.000000e+00 : f32 | |
%11 = linalg.fill ins(%cst_0 : f32) outs(%10 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%12 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%11 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
flow.return %12 : tensor<?x?x8640xf32> | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SetEncodingPass (iree-flow-set-encoding) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch.region -> (tensor<?x8640x3200xf16>{%0}) { | |
%7 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%7 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
flow.return %8 : tensor<?x8640x3200xf16> | |
} | |
%5 = flow.dispatch.region -> (tensor<?x?x8640xf32>{%0, %1}) { | |
%7 = iree_encoding.set_encoding %2 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%11 = linalg.batch_matmul_transpose_b ins(%7, %8 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%10 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%12 = iree_encoding.unset_encoding %11 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %12[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.return %extracted_slice : tensor<?x?x8640xf32> | |
} | |
%6 = hal.tensor.export %5 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After HoistEncodingOpsPass (iree-flow-hoist-encoding-ops) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch.region -> (tensor<?x8640x3200xf16>{%0}) { | |
%9 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%9 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
flow.return %10 : tensor<?x8640x3200xf16> | |
} | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = iree_encoding.set_encoding %2 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = flow.dispatch.region -> (tensor<?x?x8640xf32>{%0, %1}) { | |
%9 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%11 = linalg.batch_matmul_transpose_b ins(%6, %5 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%10 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%12 = iree_encoding.unset_encoding %11 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %12[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.return %extracted_slice : tensor<?x?x8640xf32> | |
} | |
%8 = hal.tensor.export %7 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %8 : !hal.buffer_view | |
} | |
// -----// IR Dump After FuseEncodingOpsIntoDispatchRegionsPass (iree-flow-fuse-encoding-ops-into-dispatch-regions-pass) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4:2 = flow.dispatch.region -> (tensor<?x8640x3200xf16>{%0}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}) { | |
%8 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%8 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%10 = iree_encoding.set_encoding %9 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.return %9, %10 : tensor<?x8640x3200xf16>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
} | |
%5 = flow.dispatch.region -> (tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}) { | |
%8 = iree_encoding.set_encoding %2 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.return %8 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
} | |
%6 = flow.dispatch.region -> (tensor<?x?x8640xf32>{%0, %1}) { | |
%8 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = linalg.batch_matmul_transpose_b ins(%5, %4#1 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%11 = iree_encoding.unset_encoding %10 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %11[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.return %extracted_slice : tensor<?x?x8640xf32> | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CollapseDimensionsPass (iree-flow-collapse-dimensions) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4:2 = flow.dispatch.region -> (tensor<?x8640x3200xf16>{%0}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}) { | |
%8 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%8 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%10 = iree_encoding.set_encoding %9 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.return %9, %10 : tensor<?x8640x3200xf16>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
} | |
%5 = flow.dispatch.region -> (tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}) { | |
%8 = iree_encoding.set_encoding %2 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.return %8 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
} | |
%6 = flow.dispatch.region -> (tensor<?x?x8640xf32>{%0, %1}) { | |
%8 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = linalg.batch_matmul_transpose_b ins(%5, %4#1 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%11 = iree_encoding.unset_encoding %10 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %11[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.return %extracted_slice : tensor<?x?x8640xf32> | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-flow-convert-dispatch-regions-to-workgroups) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4:2 = flow.dispatch.workgroups(%0, %3, %0, %0) : (index, tensor<8640x3200xf16>, index, index) -> (tensor<?x8640x3200xf16>{%0}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}) = | |
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16>>, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%8 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%arg5) : tensor<?x8640x3200xf16> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%9 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%11 = iree_encoding.set_encoding %10 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %10, %arg6, offsets = [0, 0, 0], sizes = [%arg5, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16>>{%arg5} | |
flow.dispatch.tensor.store %11, %arg7, offsets = [0, 0, 0], sizes = [%arg5, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg5} | |
flow.return | |
} | |
%5 = flow.dispatch.workgroups(%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index) { | |
%8 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [%arg3, %arg4, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%arg3, %arg4} -> tensor<?x?x3200xf32> | |
%9 = iree_encoding.set_encoding %8 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %9, %arg2, offsets = [0, 0, 0], sizes = [%arg3, %arg4, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%arg3, %arg4} | |
flow.return | |
} | |
%6 = flow.dispatch.workgroups(%0, %1, %cst, %5, %4#1, %0, %1, %0, %0, %1) : (index, index, f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: index, %arg3: index, %arg4: f32, %arg5: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg6: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg7: index, %arg8: index, %arg9: index, %arg10: index, %arg11: index, %arg12: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%8 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [%arg10, %arg11, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg10, %arg11} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0], sizes = [%arg10, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg10} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = tensor.empty(%arg10, %arg11) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%11 = linalg.fill ins(%arg4 : f32) outs(%10 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%12 = linalg.batch_matmul_transpose_b ins(%8, %9 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%11 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %13[0, 0, 0] [%arg10, %arg11, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %arg12, offsets = [0, 0, 0], sizes = [%arg10, %arg11, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%arg10, %arg11} | |
flow.return | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After ConvertTensorToFlowPass (iree-flow-convert-tensor-to-flow) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4:2 = flow.dispatch.workgroups(%0, %3, %0, %0) : (index, tensor<8640x3200xf16>, index, index) -> (tensor<?x8640x3200xf16>{%0}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}) = | |
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16>>, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%8 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%arg5) : tensor<?x8640x3200xf16> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%9 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%11 = iree_encoding.set_encoding %10 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %10, %arg6, offsets = [0, 0, 0], sizes = [%arg5, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16>>{%arg5} | |
flow.dispatch.tensor.store %11, %arg7, offsets = [0, 0, 0], sizes = [%arg5, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg5} | |
flow.return | |
} | |
%5 = flow.dispatch.workgroups(%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index) { | |
%8 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [%arg3, %arg4, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%arg3, %arg4} -> tensor<?x?x3200xf32> | |
%9 = iree_encoding.set_encoding %8 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %9, %arg2, offsets = [0, 0, 0], sizes = [%arg3, %arg4, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%arg3, %arg4} | |
flow.return | |
} | |
%6 = flow.dispatch.workgroups(%0, %1, %cst, %5, %4#1, %0, %1, %0, %0, %1) : (index, index, f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: index, %arg3: index, %arg4: f32, %arg5: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg6: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg7: index, %arg8: index, %arg9: index, %arg10: index, %arg11: index, %arg12: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%8 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [%arg10, %arg11, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg10, %arg11} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0], sizes = [%arg10, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg10} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = tensor.empty(%arg10, %arg11) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%11 = linalg.fill ins(%arg4 : f32) outs(%10 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%12 = linalg.batch_matmul_transpose_b ins(%8, %9 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%11 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %13[0, 0, 0] [%arg10, %arg11, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %arg12, offsets = [0, 0, 0], sizes = [%arg10, %arg11, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%arg10, %arg11} | |
flow.return | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch.workgroups(%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%8 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%arg3) : tensor<?x8640x3200xf16> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%9 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%11 = iree_encoding.set_encoding %10 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %11, %arg4, offsets = [0, 0, 0], sizes = [%arg3, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg3} | |
flow.return | |
} | |
%5 = flow.dispatch.workgroups(%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index) { | |
%8 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [%arg3, %arg4, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%arg3, %arg4} -> tensor<?x?x3200xf32> | |
%9 = iree_encoding.set_encoding %8 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %9, %arg2, offsets = [0, 0, 0], sizes = [%arg3, %arg4, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%arg3, %arg4} | |
flow.return | |
} | |
%6 = flow.dispatch.workgroups(%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: f32, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%8 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [%arg5, %arg6, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg5, %arg6} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0], sizes = [%arg5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg5} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = tensor.empty(%arg5, %arg6) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%11 = linalg.fill ins(%arg2 : f32) outs(%10 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%12 = linalg.batch_matmul_transpose_b ins(%8, %9 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%11 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%13 = iree_encoding.unset_encoding %12 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %13[0, 0, 0] [%arg5, %arg6, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %arg7, offsets = [0, 0, 0], sizes = [%arg5, %arg6, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%arg5, %arg6} | |
flow.return | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-flow-materialize-default-workgroup-count-region) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%10 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%9 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%12 = iree_encoding.set_encoding %11 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %12, %arg4, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%10 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} -> tensor<?x?x3200xf32> | |
%11 = iree_encoding.set_encoding %10 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %11, %arg2, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: f32, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%8 = flow.dispatch.workload.ordinal %arg5, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg6, 1 : index | |
%10 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8, %9} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%11 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%12 = tensor.empty(%8, %9) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%13 = linalg.fill ins(%arg2 : f32) outs(%12 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%14 = linalg.batch_matmul_transpose_b ins(%10, %11 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%13 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%15 = iree_encoding.unset_encoding %14 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %15[0, 0, 0] [%8, %9, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %arg7, offsets = [0, 0, 0], sizes = [%8, %9, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%8 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg3} | |
%9 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%10 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%9) : tensor<?x8640x3200xf16> | |
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%11 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%13 = iree_encoding.set_encoding %12 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %13, %8, offsets = [0, 0, 0], sizes = [%9, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%9} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index) { | |
%8 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%arg3, %arg4} | |
%9 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%10 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%11 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%9, %10, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%9, %10} -> tensor<?x?x3200xf32> | |
%12 = iree_encoding.set_encoding %11 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %12, %8, offsets = [0, 0, 0], sizes = [%9, %10, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%9, %10} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: f32, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%8 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg5, %arg6} | |
%9 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%arg5} | |
%10 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%arg5, %arg6} | |
%11 = flow.dispatch.workload.ordinal %arg5, 0 : index | |
%12 = flow.dispatch.workload.ordinal %arg6, 1 : index | |
%13 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%11, %12, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%11, %12} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%14 = flow.dispatch.tensor.load %9, offsets = [0, 0, 0], sizes = [%11, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%11} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%15 = tensor.empty(%11, %12) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%16 = linalg.fill ins(%arg2 : f32) outs(%15 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%17 = linalg.batch_matmul_transpose_b ins(%13, %14 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%16 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%18 = iree_encoding.unset_encoding %17 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %18[0, 0, 0] [%11, %12, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %10, offsets = [0, 0, 0], sizes = [%11, %12, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
%10 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%11 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%13 = iree_encoding.set_encoding %12 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %13, %9, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} | |
%11 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} -> tensor<?x?x3200xf32> | |
%12 = iree_encoding.set_encoding %11 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %12, %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: f32, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%8 = flow.dispatch.workload.ordinal %arg5, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg6, 1 : index | |
%10 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8, %9} | |
%11 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
%12 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%8, %9} | |
%13 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8, %9} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%14 = flow.dispatch.tensor.load %11, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%15 = tensor.empty(%8, %9) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%16 = linalg.fill ins(%arg2 : f32) outs(%15 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%17 = linalg.batch_matmul_transpose_b ins(%13, %14 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%16 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%18 = iree_encoding.unset_encoding %17 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %18[0, 0, 0] [%8, %9, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %12, offsets = [0, 0, 0], sizes = [%8, %9, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
%10 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%11 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%13 = iree_encoding.set_encoding %12 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %13, %9, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} | |
%11 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} -> tensor<?x?x3200xf32> | |
%12 = iree_encoding.set_encoding %11 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %12, %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: f32, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%8 = flow.dispatch.workload.ordinal %arg5, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg6, 1 : index | |
%10 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8, %9} | |
%11 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
%12 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%8, %9} | |
%13 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8, %9} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%14 = flow.dispatch.tensor.load %11, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%15 = tensor.empty(%8, %9) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%16 = linalg.fill ins(%arg2 : f32) outs(%15 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%17 = linalg.batch_matmul_transpose_b ins(%13, %14 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%16 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%18 = iree_encoding.unset_encoding %17 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %18[0, 0, 0] [%8, %9, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %12, offsets = [0, 0, 0], sizes = [%8, %9, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
%10 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%11 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%13 = iree_encoding.set_encoding %12 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %13, %9, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} | |
%11 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} -> tensor<?x?x3200xf32> | |
%12 = iree_encoding.set_encoding %11 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %12, %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: f32, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%8 = flow.dispatch.workload.ordinal %arg5, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg6, 1 : index | |
%10 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8, %9} | |
%11 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
%12 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%8, %9} | |
%13 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8, %9} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%14 = flow.dispatch.tensor.load %11, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%8} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%15 = tensor.empty(%8, %9) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%16 = linalg.fill ins(%arg2 : f32) outs(%15 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%17 = linalg.batch_matmul_transpose_b ins(%13, %14 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%16 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%18 = iree_encoding.unset_encoding %17 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %18[0, 0, 0] [%8, %9, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %12, offsets = [0, 0, 0], sizes = [%8, %9, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
%10 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%12 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%11 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%13 = iree_encoding.set_encoding %12 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %13, %9, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index) { | |
%8 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} | |
%11 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} -> tensor<?x?x3200xf32> | |
%12 = iree_encoding.set_encoding %11 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %12, %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: f32, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%8 = flow.dispatch.workload.ordinal %arg5, 0 : index | |
%9 = flow.dispatch.workload.ordinal %arg6, 1 : index | |
%10 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%8, %9} | |
%11 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%8} | |
%12 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%8, %9} | |
%13 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%8, %9, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%8, %9} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%14 = flow.dispatch.tensor.load %11, offsets = [0, 0, 0], sizes = [%8, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%8} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%15 = tensor.empty(%8, %9) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%16 = linalg.fill ins(%arg2 : f32) outs(%15 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%17 = linalg.batch_matmul_transpose_b ins(%13, %14 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%16 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%18 = iree_encoding.unset_encoding %17 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %18[0, 0, 0] [%8, %9, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %12, offsets = [0, 0, 0], sizes = [%8, %9, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%8, %9} | |
flow.return | |
} count(%arg2: index, %arg3: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineConstantsPass (iree-flow-outline-constants) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @matmul_broad_dispatch_0 { | |
flow.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%5 = iree_encoding.set_encoding %4 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %5, %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_1 { | |
flow.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @matmul_broad_dispatch_2 { | |
flow.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = flow.dispatch @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} | |
%5 = flow.dispatch @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%2, %0, %1) : (tensor<?x?x3200xf32>{%0, %1}, index, index) -> %2 as tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1} | |
%6 = flow.dispatch @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %5, %4, %0, %1) : (f32, tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0, %1}, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0}, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%7 = hal.tensor.export %6 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c3200 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major_0 = hal.encoding_type<dense_row_major> : i32 | |
%c8640 = arith.constant 8640 : index | |
%c3200_1 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200_1]) type(%element_type_f16) encoding(%dense_row_major_0) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%c0 = arith.constant 0 : index | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%c0_2 = arith.constant 0 : index | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0_2 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%c0_3 = arith.constant 0 : index | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0_3 to %2 for %2], %9[%c0_3 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c3200 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major_0 = hal.encoding_type<dense_row_major> : i32 | |
%c8640 = arith.constant 8640 : index | |
%c3200_1 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200_1]) type(%element_type_f16) encoding(%dense_row_major_0) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%c0 = arith.constant 0 : index | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%c0_2 = arith.constant 0 : index | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0_2 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%c0_3 = arith.constant 0 : index | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0_3 to %2 for %2], %9[%c0_3 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major_0 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major_0) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major_0 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major_0) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major_0 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major_0) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CombineInitializers (iree-util-combine-initializers) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>{%0} : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %2 for %2], %0, %1) : (!stream.resource<*>{%2}, index, index) -> %4{%2} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?x?x8640xf32>{%0, %1} : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %2 for %2], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%2}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%11} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%11} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%11} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%11} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c55296000} | |
%8 = arith.muli %0, %c55296000 : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%5[%c0 to %3 for %3], %0, %1) : (!stream.resource<*>{%3}, index, index) -> %5{%3} | |
%11 = arith.muli %0, %c34560 : index | |
%12 = arith.muli %11, %1 : index | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %10[%c0 to %3 for %3], %9[%c0 to %8 for %8], %0, %1) : (f32, !stream.resource<*>{%3}, !stream.resource<*>{%8}, index, index) -> !stream.resource<*>{%12} | |
%14 = stream.async.transfer %13 : !stream.resource<*>{%12} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%12} | |
%15 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%12} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = arith.muli %0, %c55296000 : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%6} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %3 for %3], %0, %1) : (!stream.resource<external>{%3}, index, index) -> %4{%3} | |
%9 = arith.muli %0, %c34560 : index | |
%10 = arith.muli %9, %1 : index | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %8[%c0 to %3 for %3], %7[%c0 to %6 for %6], %0, %1) : (f32, !stream.resource<external>{%3}, !stream.resource<transient>{%6}, index, index) -> !stream.resource<external>{%10} | |
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%10} -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = arith.muli %0, %c55296000 : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%6} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %3 for %3], %0, %1) : (!stream.resource<external>{%3}, index, index) -> %4{%3} | |
%9 = arith.muli %0, %c34560 : index | |
%10 = arith.muli %9, %1 : index | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %8[%c0 to %3 for %3], %7[%c0 to %6 for %6], %0, %1) : (f32, !stream.resource<external>{%3}, !stream.resource<transient>{%6}, index, index) -> !stream.resource<external>{%10} | |
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%10} -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = arith.muli %0, %c55296000 : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%6} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %3 for %3], %0, %1) : (!stream.resource<external>{%3}, index, index) -> %4{%3} | |
%9 = arith.muli %0, %c34560 : index | |
%10 = arith.muli %9, %1 : index | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %8[%c0 to %3 for %3], %7[%c0 to %6 for %6], %0, %1) : (f32, !stream.resource<external>{%3}, !stream.resource<transient>{%6}, index, index) -> !stream.resource<external>{%10} | |
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%10} -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = arith.muli %0, %c55296000 : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%6} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %3 for %3], %0, %1) : (!stream.resource<external>{%3}, index, index) -> %4{%3} | |
%9 = arith.muli %0, %c34560 : index | |
%10 = arith.muli %9, %1 : index | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %8[%c0 to %3 for %3], %7[%c0 to %6 for %6], %0, %1) : (f32, !stream.resource<external>{%3}, !stream.resource<transient>{%6}, index, index) -> !stream.resource<external>{%10} | |
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%10} -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = arith.muli %0, %c55296000 : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%6} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %3 for %3], %0, %1) : (!stream.resource<external>{%3}, index, index) -> %4{%3} | |
%9 = arith.muli %0, %c34560 : index | |
%10 = arith.muli %9, %1 : index | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %8[%c0 to %3 for %3], %7[%c0 to %6 for %6], %0, %1) : (f32, !stream.resource<external>{%3}, !stream.resource<transient>{%6}, index, index) -> !stream.resource<external>{%10} | |
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%10} -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = arith.muli %0, %c55296000 : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%6} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %3 for %3], %0, %1) : (!stream.resource<external>{%3}, index, index) -> %4{%3} | |
%9 = arith.muli %0, %c34560 : index | |
%10 = arith.muli %9, %1 : index | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %8[%c0 to %3 for %3], %7[%c0 to %6 for %6], %0, %1) : (f32, !stream.resource<external>{%3}, !stream.resource<transient>{%6}, index, index) -> !stream.resource<external>{%10} | |
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%10} -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32(%arg0: f32, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0, %1} -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%0} -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = tensor.empty(%0, %1) : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%8 = linalg.fill ins(%arg0 : f32) outs(%7 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%9 = linalg.batch_matmul_transpose_b ins(%5, %6 : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%8 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%10 = iree_encoding.unset_encoding %9 : tensor<?x?x8640xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<?x?x8640xf32> | |
%extracted_slice = tensor.extract_slice %10[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x8640xf32> to tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %extracted_slice, %4, offsets = [0, 0, 0], sizes = [%0, %1, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
util.func public @matmul_broad(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_broad(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = arith.muli %0, %c55296000 : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_0::@matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%6} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_1::@matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200[%0, %1](%4[%c0 to %3 for %3], %0, %1) : (!stream.resource<external>{%3}, index, index) -> %4{%3} | |
%9 = arith.muli %0, %c34560 : index | |
%10 = arith.muli %9, %1 : index | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_broad_dispatch_2::@matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32[%0, %1](%cst, %8[%c0 to %3 for %3], %7[%c0 to %6 for %6], %0, %1) : (f32, !stream.resource<external>{%3}, !stream.resource<transient>{%6}, index, index) -> !stream.resource<external>{%10} | |
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%10} -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "broadwell", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,-avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,+invpcid,+64bit,-xsavec,-avx10.1-512,-avx512vpopcntdq,+cmov,-avx512vp2intersect,-avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,+rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-vpclmulqdq,-avx512vl,-uintr,-cf,-clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,-gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,-avx512f,-amx-bf16,-avx512bf16,-avx512vnni,-push2pop2,+cx8,-avx512bw,+sse3,-pku,-nf,+fsgsbase,-clzero,-mwaitx,-lwp,+lzcnt,-sha,-movdir64b,-ppx,-wbnoinvd,-enqcmd,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,-avx512bitalg,-rdpru,-clwb,+mmx,+sse2,+rdseed,-avx512vbmi2,-prefetchi,-rdpid,-fma4,-avx512vbmi,-shstk,-vaes,-waitpkg,-sgx,+fxsr,-avx512dq,-sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 32 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map3 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @matmul_broad_dispatch_0 { | |
stream.executable.export public @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_0_elementwise_broadcast_Dx8640x3200_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map3, #map4], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = iree_encoding.set_encoding %5 : tensor<?x8640x3200xf16> -> tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [%1, 8640, 3200], strides = [1, 1, 1] : tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<writeonly:tensor<?x8640x3200xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_1 { | |
stream.executable.export public @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_1_set_encoding_LHS_DxDx3200(%arg0: !stream.binding, %arg1: index, %arg2: index) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
%3 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} -> tensor<?x?x3200xf32> | |
%4 = iree_encoding.set_encoding %3 : tensor<?x?x3200xf32> -> tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
flow.dispatch.tensor.store %4, %2, offsets = [0, 0, 0], sizes = [%0, %1, 3200], strides = [1, 1, 1] : tensor<?x?x3200xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> !flow.dispatch.tensor<readwrite:tensor<?x?x3200xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @matmul_broad_dispatch_2 { | |
stream.executable.export public @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16xf32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_broad_dispatch_2_batch_matmul_transpose_b_DxDx8640x3200_f32xf16x |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment