Created
May 10, 2025 05:18
-
-
Save pashu123/c2d20af564ab962c637c9bcf59030469 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
func.func @matmul(%arg0: tensor<2x2816xf16>, %arg1: tensor<2816x1280xf16>) -> tensor<2x1280xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<2x1280xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%2 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%1 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%3 = arith.extf %in : f16 to f32 | |
%4 = arith.extf %in_0 : f16 to f32 | |
%5 = arith.mulf %3, %4 : f32 | |
%6 = arith.addf %out, %5 : f32 | |
linalg.yield %6 : f32 | |
} -> tensor<2x1280xf32> | |
return %2 : tensor<2x1280xf32> | |
} | |
} | |
// -----// IR Dump After IREEImportPublicPass (iree-import-public) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
util.func public @matmul(%arg0: tensor<2x2816xf16>, %arg1: tensor<2816x1280xf16>) -> tensor<2x1280xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<2x1280xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%2 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%1 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%3 = arith.extf %in : f16 to f32 | |
%4 = arith.extf %in_0 : f16 to f32 | |
%5 = arith.mulf %3, %4 : f32 | |
%6 = arith.addf %out, %5 : f32 | |
linalg.yield %6 : f32 | |
} -> tensor<2x1280xf32> | |
util.return %2 : tensor<2x1280xf32> | |
} | |
} | |
// -----// IR Dump After ImportMLProgramPass (iree-import-ml-program) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
util.func public @matmul(%arg0: tensor<2x2816xf16>, %arg1: tensor<2816x1280xf16>) -> tensor<2x1280xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<2x1280xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%2 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%1 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%3 = arith.extf %in : f16 to f32 | |
%4 = arith.extf %in_0 : f16 to f32 | |
%5 = arith.mulf %3, %4 : f32 | |
%6 = arith.addf %out, %5 : f32 | |
linalg.yield %6 : f32 | |
} -> tensor<2x1280xf32> | |
util.return %2 : tensor<2x1280xf32> | |
} | |
} | |
// -----// IR Dump After SanitizeModuleNamesPass (iree-sanitize-module-names) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
util.func public @matmul(%arg0: tensor<2x2816xf16>, %arg1: tensor<2816x1280xf16>) -> tensor<2x1280xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<2x1280xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%2 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%1 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%3 = arith.extf %in : f16 to f32 | |
%4 = arith.extf %in_0 : f16 to f32 | |
%5 = arith.mulf %3, %4 : f32 | |
%6 = arith.addf %out, %5 : f32 | |
linalg.yield %6 : f32 | |
} -> tensor<2x1280xf32> | |
util.return %2 : tensor<2x1280xf32> | |
} | |
} | |
// -----// IR Dump After ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
util.func public @matmul(%arg0: tensor<2x2816xf16>, %arg1: tensor<2816x1280xf16>) -> tensor<2x1280xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<2x1280xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%2 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%1 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%3 = arith.extf %in : f16 to f32 | |
%4 = arith.extf %in_0 : f16 to f32 | |
%5 = arith.mulf %3, %4 : f32 | |
%6 = arith.addf %out, %5 : f32 | |
linalg.yield %6 : f32 | |
} -> tensor<2x1280xf32> | |
util.return %2 : tensor<2x1280xf32> | |
} | |
} | |
// -----// IR Dump After DemoteF64ToF32Pass (iree-input-conversion-demote-f64-to-f32) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
util.func public @matmul(%arg0: tensor<2x2816xf16>, %arg1: tensor<2816x1280xf16>) -> tensor<2x1280xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<2x1280xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%2 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%1 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%3 = arith.extf %in : f16 to f32 | |
%4 = arith.extf %in_0 : f16 to f32 | |
%5 = arith.mulf %3, %4 : f32 | |
%6 = arith.addf %out, %5 : f32 | |
linalg.yield %6 : f32 | |
} -> tensor<2x1280xf32> | |
util.return %2 : tensor<2x1280xf32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
util.func public @matmul(%arg0: tensor<2x2816xf16>, %arg1: tensor<2816x1280xf16>) -> tensor<2x1280xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<2x1280xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%2 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%1 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%3 = arith.extf %in : f16 to f32 | |
%4 = arith.extf %in_0 : f16 to f32 | |
%5 = arith.mulf %3, %4 : f32 | |
%6 = arith.addf %out, %5 : f32 | |
linalg.yield %6 : f32 | |
} -> tensor<2x1280xf32> | |
util.return %2 : tensor<2x1280xf32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = util.call @_matmul(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
util.func private @_matmul(%arg0: tensor<2x2816xf16>, %arg1: tensor<2816x1280xf16>) -> tensor<2x1280xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<2x1280xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%2 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%1 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%3 = arith.extf %in : f16 to f32 | |
%4 = arith.extf %in_0 : f16 to f32 | |
%5 = arith.mulf %3, %4 : f32 | |
%6 = arith.addf %out, %5 : f32 | |
linalg.yield %6 : f32 | |
} -> tensor<2x1280xf32> | |
util.return %2 : tensor<2x1280xf32> | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func private @_matmul(%arg0: tensor<2x2816xf16>, %arg1: tensor<2816x1280xf16>) -> tensor<2x1280xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<2x1280xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%1 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%3 = arith.extf %in : f16 to f32 | |
%4 = arith.extf %in_0 : f16 to f32 | |
%5 = arith.mulf %3, %4 : f32 | |
%6 = arith.addf %out, %5 : f32 | |
linalg.yield %6 : f32 | |
} -> tensor<2x1280xf32> | |
util.return %2 : tensor<2x1280xf32> | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = util.call @_matmul(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module { | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module attributes {hal.device.targets = [#hal.device.alias<"hip"> : !hal.device]} { | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #hal.device.alias<"hip"> : !hal.device | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- // | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #hal.device.alias<"hip"> : !hal.device | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AttrBasedPipelinePass (iree-preprocessing-attr-based-pipeline) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After WarnOnUninitializedValuesPass (iree-global-opt-warn-on-uninitialized-values) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After StripDebugOpsPass (iree-util-strip-debug-ops) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldUnitExtentDimsPass (iree-dispatch-creation-fold-unit-extent-dims) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertStridedContractionToContractionPass (iree-global-opt-convert-strided-contraction-to-contraction) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After PropagateLinalgTransposePass (iree-global-opt-propagate-linalg-transpose) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = iree_encoding.set_encoding %0 : tensor<2x2816xf16> -> tensor<2x2816xf16, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>> | |
%3 = iree_encoding.set_encoding %1 : tensor<2816x1280xf16> -> tensor<2816x1280xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>> | |
%4 = tensor.empty() : tensor<2x1280xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>> | |
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<2x1280xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>>) -> tensor<2x1280xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>> | |
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%2, %3 : tensor<2x2816xf16, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>>, tensor<2816x1280xf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>>) outs(%5 : tensor<2x1280xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>> | |
%7 = iree_encoding.unset_encoding %6 : tensor<2x1280xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f16, f16, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iteration_sizes = [2, 1280, 2816]>> -> tensor<2x1280xf32> | |
%8 = hal.tensor.export %7 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %8 : !hal.buffer_view | |
} | |
// -----// IR Dump After MaterializeHostEncodingPass (iree-codegen-materialize-host-encoding) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeHomogeneousEncodingsPass (iree-global-opt-materialize-homogeneous-encodings) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SimplifyPackUnpackPass (iree-global-opt-simplify-pack-unpack) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After HoistIntoGlobalsPass (iree-util-hoist-into-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After JitGlobalsPass (iree-consteval-jit-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-dispatch-creation-tensor-pad-to-tensor-insert-slice) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After HoistIntoGlobalsPass (iree-util-hoist-into-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%6 = arith.extf %in : f16 to f32 | |
%7 = arith.extf %in_0 : f16 to f32 | |
%8 = arith.mulf %6, %7 : f32 | |
%9 = arith.addf %out, %8 : f32 | |
linalg.yield %9 : f32 | |
} -> tensor<2x1280xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = flow.dispatch.region -> (tensor<2x1280xf32>) { | |
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%7 = arith.extf %in : f16 to f32 | |
%8 = arith.extf %in_0 : f16 to f32 | |
%9 = arith.mulf %7, %8 : f32 | |
%10 = arith.addf %out, %9 : f32 | |
linalg.yield %10 : f32 | |
} -> tensor<2x1280xf32> | |
flow.return %6 : tensor<2x1280xf32> | |
} | |
%5 = hal.tensor.export %4 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.region -> (tensor<2x1280xf32>) { | |
%4 = tensor.empty() : tensor<2x1280xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%5 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%7 = arith.extf %in : f16 to f32 | |
%8 = arith.extf %in_0 : f16 to f32 | |
%9 = arith.mulf %7, %8 : f32 | |
%10 = arith.addf %out, %9 : f32 | |
linalg.yield %10 : f32 | |
} -> tensor<2x1280xf32> | |
flow.return %6 : tensor<2x1280xf32> | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.region -> (tensor<2x1280xf32>) { | |
%4 = tensor.empty() : tensor<2x1280xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%5 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%7 = arith.extf %in : f16 to f32 | |
%8 = arith.extf %in_0 : f16 to f32 | |
%9 = arith.mulf %7, %8 : f32 | |
%10 = arith.addf %out, %9 : f32 | |
linalg.yield %10 : f32 | |
} -> tensor<2x1280xf32> | |
flow.return %6 : tensor<2x1280xf32> | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After ConvertEncodingToFlowPass (iree-dispatch-creation-convert-encoding-to-flow) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.region -> (tensor<2x1280xf32>) { | |
%4 = tensor.empty() : tensor<2x1280xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%5 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%7 = arith.extf %in : f16 to f32 | |
%8 = arith.extf %in_0 : f16 to f32 | |
%9 = arith.mulf %7, %8 : f32 | |
%10 = arith.addf %out, %9 : f32 | |
linalg.yield %10 : f32 | |
} -> tensor<2x1280xf32> | |
flow.return %6 : tensor<2x1280xf32> | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After HoistIntoGlobalsPass (iree-util-hoist-into-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.region -> (tensor<2x1280xf32>) { | |
%4 = tensor.empty() : tensor<2x1280xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%5 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%7 = arith.extf %in : f16 to f32 | |
%8 = arith.extf %in_0 : f16 to f32 | |
%9 = arith.mulf %7, %8 : f32 | |
%10 = arith.addf %out, %9 : f32 | |
linalg.yield %10 : f32 | |
} -> tensor<2x1280xf32> | |
flow.return %6 : tensor<2x1280xf32> | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> = | |
(%arg2: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg3: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg4: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = iree_tensor_ext.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After StripDebugOpsPass (iree-util-strip-debug-ops) //----- // | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineConstantsPass (iree-flow-outline-constants) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CloneToConsumersPass (iree-stream-clone-to-consumers) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
flow.executable private @matmul_dispatch_0 { | |
flow.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>>, %arg1: !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>>, %arg2: !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = iree_tensor_ext.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%1 = iree_tensor_ext.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%2 = tensor.empty() : tensor<2x1280xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%4 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%0, %1 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%3 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%5 = arith.extf %in : f16 to f32 | |
%6 = arith.extf %in_0 : f16 to f32 | |
%7 = arith.mulf %5, %6 : f32 | |
%8 = arith.addf %out, %7 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<2x2816xf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<2816x1280xf16> | |
%2 = flow.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0, %1) : (tensor<2x2816xf16>, tensor<2816x1280xf16>) -> tensor<2x1280xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<2x1280xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c2 = arith.constant 2 : index | |
%c2816 = arith.constant 2816 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_f16_0 = hal.element_type<f16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
%c2816_2 = arith.constant 2816 : index | |
%c1280 = arith.constant 1280 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816_2, %c1280]) type(%element_type_f16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c2 = arith.constant 2 : index | |
%c2816 = arith.constant 2816 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_f16_0 = hal.element_type<f16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
%c2816_2 = arith.constant 2816 : index | |
%c1280 = arith.constant 1280 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816_2, %c1280]) type(%element_type_f16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_f16_0 = hal.element_type<f16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_f16_0 = hal.element_type<f16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_f16_0 = hal.element_type<f16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CombineInitializersPass (iree-util-combine-initializers) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SpecializeEncodingsPass (iree-stream-specialize-encodings) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x2816xf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2816x1280xf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x1280xf32> : index | |
%7 = stream.tensor.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%2, %5) : (tensor<2x2816xf16> in !stream.resource<*>{%0}, tensor<2816x1280xf16> in !stream.resource<*>{%3}) -> tensor<2x1280xf32> in !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<2x1280xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After MaterializeEncodingsPass (iree-stream-materialize-encodings) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c11264} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c7208960} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c7208960} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%1[%c0 to %c11264 for %c11264], %3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<*>{%c11264}, !stream.resource<*>{%c7208960}) -> !stream.resource<*>{%c10240} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c10240} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%0[%c0 to %c11264 for %c11264], %1[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After SyncInitializersPass (iree-stream-sync-initializers) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.timepoint.immediate => !stream.timepoint | |
%3 = stream.timepoint.immediate => !stream.timepoint | |
%4 = stream.timepoint.join max(%2, %3) => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%4) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%7 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %7 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%5 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%2 = stream.timepoint.immediate => !stream.timepoint | |
%3 = stream.timepoint.immediate => !stream.timepoint | |
%4 = stream.timepoint.join max(%2, %3) => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%4) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%7 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %7 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%5 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} { | |
%4 = stream.async.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg2[%c0 to %c11264 for %c11264], %arg3[%c0 to %c7208960 for %c7208960]) : (!stream.resource<external>{%c11264}, !stream.resource<external>{%c7208960}) -> !stream.resource<external>{%c10240} | |
stream.yield %4 : !stream.resource<external>{%c10240} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c10240} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ScheduleAllocationPass (iree-stream-schedule-allocation) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0_0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0_0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0_0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After PropagateSubrangesPass (iree-util-propagate-subranges) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SCFToControlFlowPass (convert-scf-to-cf) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ElideTimepointsPass (iree-stream-elide-timepoints) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseDispatchBindingsPass (iree-stream-fuse-dispatch-bindings) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: index) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0, %c0, %c0 : index, index, index) { | |
ro %arg2[%c0_0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0_0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0_0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AnnotateDispatchArgumentsPass (iree-stream-annotate-dispatch-arguments) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: index {stream.values = [0 : index]}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0, %c0, %c0 : index, index, index) { | |
ro %arg2[%c0_0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0_0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0_0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AnnotateDispatchAssumptionsPass (iree-stream-annotate-dispatch-assumptions) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: index {stream.values = [0 : index]}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}) { | |
%0:3 = util.assume.int | |
%arg3<umin = 0, umax = 0>, | |
%arg4<umin = 0, umax = 0>, | |
%arg5<umin = 0, umax = 0> | |
: index, index, index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%1 = stream.binding.subspan %arg0[%0#0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%2 = stream.binding.subspan %arg1[%0#1] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%3 = stream.binding.subspan %arg2[%0#2] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%5 = iree_tensor_ext.dispatch.tensor.load %2, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%6 = tensor.empty() : tensor<2x1280xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%8 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%4, %5 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%7 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_0 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %8, %3, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0, %c0, %c0 : index, index, index) { | |
ro %arg2[%c0_0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0_0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0_0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PackDispatchOperandsPass (iree-stream-pack-dispatch-operands) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) { | |
%0 = arith.extui %arg3 : i32 to i64 | |
%1 = arith.extui %arg4 : i32 to i64 | |
%c32_i64 = arith.constant 32 : i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg5 : i32 to i64 | |
%6 = arith.extui %arg6 : i32 to i64 | |
%c32_i64_0 = arith.constant 32 : i64 | |
%7 = arith.shli %6, %c32_i64_0 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg7 : i32 to i64 | |
%11 = arith.extui %arg8 : i32 to i64 | |
%c32_i64_1 = arith.constant 32 : i64 | |
%12 = arith.shli %11, %c32_i64_1 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15:3 = util.assume.int | |
%4<umin = 0, umax = 0>, | |
%9<umin = 0, umax = 0>, | |
%14<umin = 0, umax = 0> | |
: index, index, index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%19 = iree_tensor_ext.dispatch.tensor.load %16, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%20 = iree_tensor_ext.dispatch.tensor.load %17, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%21 = tensor.empty() : tensor<2x1280xf32> | |
%22 = linalg.fill ins(%cst : f32) outs(%21 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%23 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%19, %20 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%22 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%24 = arith.extf %in : f16 to f32 | |
%25 = arith.extf %in_2 : f16 to f32 | |
%26 = arith.mulf %24, %25 : f32 | |
%27 = arith.addf %out, %26 : f32 | |
linalg.yield %27 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %23, %18, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%c0_i64 = arith.constant 0 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i64_1 = arith.constant 0 : i64 | |
%c0_i32_2 = arith.constant 0 : i32 | |
%c0_i64_3 = arith.constant 0 : i64 | |
%c0_i32_4 = arith.constant 0 : i32 | |
%c32_i64_5 = arith.constant 32 : i64 | |
%c0_i64_6 = arith.constant 0 : i64 | |
%c0_i32_7 = arith.constant 0 : i32 | |
%c0_i64_8 = arith.constant 0 : i64 | |
%c0_i32_9 = arith.constant 0 : i32 | |
%c32_i64_10 = arith.constant 32 : i64 | |
%c0_i64_11 = arith.constant 0 : i64 | |
%c0_i32_12 = arith.constant 0 : i32 | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0_i32, %c0_i32_2, %c0_i32_4, %c0_i32_7, %c0_i32_9, %c0_i32_12 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0_0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0_0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0_0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg3 : i32 to i64 | |
%1 = arith.extui %arg4 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg5 : i32 to i64 | |
%6 = arith.extui %arg6 : i32 to i64 | |
%7 = arith.shli %6, %c32_i64 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg7 : i32 to i64 | |
%11 = arith.extui %arg8 : i32 to i64 | |
%12 = arith.shli %11, %c32_i64 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15:3 = util.assume.int | |
%4<umin = 0, umax = 0>, | |
%9<umin = 0, umax = 0>, | |
%14<umin = 0, umax = 0> | |
: index, index, index | |
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%19 = iree_tensor_ext.dispatch.tensor.load %16, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%20 = iree_tensor_ext.dispatch.tensor.load %17, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%21 = tensor.empty() : tensor<2x1280xf32> | |
%22 = linalg.fill ins(%cst : f32) outs(%21 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%23 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%19, %20 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%22 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%24 = arith.extf %in : f16 to f32 | |
%25 = arith.extf %in_0 : f16 to f32 | |
%26 = arith.mulf %24, %25 : f32 | |
%27 = arith.addf %out, %26 : f32 | |
linalg.yield %27 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %23, %18, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg3 : i32 to i64 | |
%1 = arith.extui %arg4 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg5 : i32 to i64 | |
%6 = arith.extui %arg6 : i32 to i64 | |
%7 = arith.shli %6, %c32_i64 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg7 : i32 to i64 | |
%11 = arith.extui %arg8 : i32 to i64 | |
%12 = arith.shli %11, %c32_i64 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15:3 = util.assume.int | |
%4<umin = 0, umax = 0>, | |
%9<umin = 0, umax = 0>, | |
%14<umin = 0, umax = 0> | |
: index, index, index | |
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%19 = iree_tensor_ext.dispatch.tensor.load %16, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%20 = iree_tensor_ext.dispatch.tensor.load %17, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%21 = tensor.empty() : tensor<2x1280xf32> | |
%22 = linalg.fill ins(%cst : f32) outs(%21 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%23 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%19, %20 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%22 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%24 = arith.extf %in : f16 to f32 | |
%25 = arith.extf %in_0 : f16 to f32 | |
%26 = arith.mulf %24, %25 : f32 | |
%27 = arith.addf %out, %26 : f32 | |
linalg.yield %27 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %23, %18, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg3 : i32 to i64 | |
%1 = arith.extui %arg4 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg5 : i32 to i64 | |
%6 = arith.extui %arg6 : i32 to i64 | |
%7 = arith.shli %6, %c32_i64 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg7 : i32 to i64 | |
%11 = arith.extui %arg8 : i32 to i64 | |
%12 = arith.shli %11, %c32_i64 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15:3 = util.assume.int | |
%4<umin = 0, umax = 0>, | |
%9<umin = 0, umax = 0>, | |
%14<umin = 0, umax = 0> | |
: index, index, index | |
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%19 = iree_tensor_ext.dispatch.tensor.load %16, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%20 = iree_tensor_ext.dispatch.tensor.load %17, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%21 = tensor.empty() : tensor<2x1280xf32> | |
%22 = linalg.fill ins(%cst : f32) outs(%21 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%23 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%19, %20 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%22 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%24 = arith.extf %in : f16 to f32 | |
%25 = arith.extf %in_0 : f16 to f32 | |
%26 = arith.mulf %24, %25 : f32 | |
%27 = arith.addf %out, %26 : f32 | |
linalg.yield %27 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %23, %18, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldUniformOperandsPass (iree-stream-fold-uniform-operands) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%c0_i32 = arith.constant 0 : i32 | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %c0_i32 : i32 to i64 | |
%1 = arith.extui %c0_i32 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %c0_i32 : i32 to i64 | |
%6 = arith.extui %c0_i32 : i32 to i64 | |
%7 = arith.shli %6, %c32_i64 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %c0_i32 : i32 to i64 | |
%11 = arith.extui %c0_i32 : i32 to i64 | |
%12 = arith.shli %11, %c32_i64 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15:3 = util.assume.int | |
%4<umin = 0, umax = 0>, | |
%9<umin = 0, umax = 0>, | |
%14<umin = 0, umax = 0> | |
: index, index, index | |
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%19 = iree_tensor_ext.dispatch.tensor.load %16, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%20 = iree_tensor_ext.dispatch.tensor.load %17, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%21 = tensor.empty() : tensor<2x1280xf32> | |
%22 = linalg.fill ins(%cst : f32) outs(%21 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%23 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%19, %20 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%22 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%24 = arith.extf %in : f16 to f32 | |
%25 = arith.extf %in_0 : f16 to f32 | |
%26 = arith.mulf %24, %25 : f32 | |
%27 = arith.addf %out, %26 : f32 | |
linalg.yield %27 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %23, %18, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPOPass (iree-util-ipo) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- // | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
stream.executable private @matmul_dispatch_0 { | |
stream.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeInterfacesPass (iree-hal-materialize-interfaces) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#pipeline_layout = #hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
hal.executable private @matmul_dispatch_0 { | |
hal.executable.variant public @rocm_hsaco_fb target(#executable_target_rocm_hsaco_fb) { | |
hal.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 ordinal(0) layout(#pipeline_layout) count(%arg0: !hal.device) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(#pipeline_layout) binding(2) alignment(64) offset(%c0) flags(Indirect) : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@rocm_hsaco_fb::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PruneExecutablesPass (iree-hal-prune-executables) //----- // | |
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2) -> (d0, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> | |
#pipeline_layout = #hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect> | |
#device_target_hip = #hal.device.target<"hip", [#executable_target_rocm_hsaco_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_hip | |
hal.executable private @matmul_dispatch_0 { | |
hal.executable.variant public @rocm_hsaco_fb target(#executable_target_rocm_hsaco_fb) { | |
hal.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 ordinal(0) layout(#pipeline_layout) count(%arg0: !hal.device) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(#pipeline_layout) binding(2) alignment(64) offset(%c0) flags(Indirect) : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
} | |
util.func public @matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul(%input0: tensor<2x2816xf16>, %input1: tensor<2816x1280xf16>) -> (%output0: tensor<2x1280xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c10240 = arith.constant 10240 : index | |
%c7208960 = arith.constant 7208960 : index | |
%c11264 = arith.constant 11264 : index | |
%c1280 = arith.constant 1280 : index | |
%c2816 = arith.constant 2816 : index | |
%c2 = arith.constant 2 : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c2, %c2816]) type(%element_type_f16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<2x2816xf16> in !stream.resource<external>{%c11264} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c2816, %c1280]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<2816x1280xf16> in !stream.resource<external>{%c7208960} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c10240} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c11264}, %1 as %arg3: !stream.resource<external>{%c7208960}, %result as %arg4: !stream.resource<external>{%c10240}) { | |
stream.cmd.dispatch @matmul_dispatch_0::@rocm_hsaco_fb::@matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 { | |
ro %arg2[%c0 for %c11264] : !stream.resource<external>{%c11264}, | |
ro %arg3[%c0 for %c7208960] : !stream.resource<external>{%c7208960}, | |
wo %arg4[%c0 for %c10240] : !stream.resource<external>{%c10240} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c10240} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<2x1280xf32> in !stream.resource<external>{%c10240} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeDeviceEncodingPass (iree-codegen-materialize-device-encoding) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After MaterializeEncodingIntoPaddingPass (iree-codegen-materialize-encoding-into-padding) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After BufferizeCopyOnlyDispatchesPass (iree-codegen-bufferize-copy-only-dispatches) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After GPUGeneralizeNamedOpsPass (iree-codegen-gpu-generalize-named-ops) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After ROCDLConfigureBufferInstructionsPass (iree-rocdl-configure-buffer-instructions) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After TypePropagationPass (iree-codegen-type-propagation) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After BubbleUpOrdinalOpsPass (iree-codegen-bubble-up-ordinal-ops) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After BufferizeCopyOnlyDispatchesPass (iree-codegen-bufferize-copy-only-dispatches) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After DecomposeSoftmaxPass (iree-codegen-decompose-softmax) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After BlockDynamicDimensionsPass (iree-codegen-block-dynamic-dimensions) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After MaterializeTuningSpecsPass (iree-codegen-materialize-tuning-specs) //----- // | |
module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
// -----// IR Dump After MaterializeUserConfigsPass (iree-codegen-materialize-user-configs) //----- // | |
module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
// -----// IR Dump After LLVMGPUSelectLoweringStrategyPass (iree-llvmgpu-select-lowering-strategy) //----- // | |
module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
// -----// IR Dump After ConfigureTargetExecutableVariantsPass (iree-hal-configure-target-executable-variants) //----- // | |
hal.executable.variant public @rocm_hsaco_fb target(<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>) { | |
hal.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 ordinal(0) layout(#hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) count(%arg0: !hal.device) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After ConfigureExecutablesPass (iree-hal-configure-executables) //----- // | |
hal.executable private @matmul_dispatch_0 { | |
hal.executable.variant public @rocm_hsaco_fb target(<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>) { | |
hal.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 ordinal(0) layout(#hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) count(%arg0: !hal.device) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
} | |
// -----// IR Dump After HoistExecutableObjectsPass (iree-hal-hoist-executable-objects) //----- // | |
hal.executable.variant public @rocm_hsaco_fb target(<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx942", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>], subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 16384>>, ukernels = "none"}>) { | |
hal.executable.export public @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32 ordinal(0) layout(#hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) count(%arg0: !hal.device) -> (index, index, index) { | |
%x, %y, %z = iree_tensor_ext.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After LowerExecutableUsingTransformDialectPass (iree-codegen-lower-executable-using-transform-dialect) //----- // | |
module { | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<2x1280xf32>) -> tensor<2x1280xf32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%3, %4 : tensor<2x2816xf16>, tensor<2816x1280xf16>) outs(%6 : tensor<2x1280xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_0: f16, %out: f32): | |
%8 = arith.extf %in : f16 to f32 | |
%9 = arith.extf %in_0 : f16 to f32 | |
%10 = arith.mulf %8, %9 : f32 | |
%11 = arith.addf %out, %10 : f32 | |
linalg.yield %11 : f32 | |
} -> tensor<2x1280xf32> | |
iree_tensor_ext.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
} | |
// -----// IR Dump After TileAndDistributeToWorkgroupsUsingForallOpPass (iree-codegen-tile-and-distribute-to-workgroups-using-forall-op) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After ConvertAttentionToOnlineAttentionPass (iree-linalg-ext-convert-attention-to-online-attention) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After GPUPromoteMatmulOperandsPass (iree-codegen-gpu-promote-matmul-operands) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After GPUApplyTilingLevelPass (iree-codegen-gpu-apply-tiling-level) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After LoopCoalescing (affine-loop-coalescing) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = scf.forall (%arg0, %arg1) in (2, 1280) shared_outs(%arg2 = %5) -> (tensor<2x1280xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [1, 2816] [1, 1] : tensor<2x2816xf16> to tensor<1x2816xf16> | |
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [2816, 1] [1, 1] : tensor<2816x1280xf16> to tensor<2816x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<2x1280xf32> to tensor<1x1xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"]} ins(%extracted_slice, %extracted_slice_0 : tensor<1x2816xf16>, tensor<2816x1xf16>) outs(%7 : tensor<1x1xf32>) attrs = {lowering_config = #iree_gpu.lowering_config<{partial_reduction = [0, 0, 2816], subgroup_basis = [[1, 1, 11], [0, 1, 2]], thread = [0, 0, 4], thread_basis = [[1, 1, 64], [0, 1, 2]], workgroup = [1, 1, 0]}>} { | |
^bb0(%in: f16, %in_2: f16, %out: f32): | |
%9 = arith.extf %in : f16 to f32 | |
%10 = arith.extf %in_2 : f16 to f32 | |
%11 = arith.mulf %9, %10 : f32 | |
%12 = arith.addf %out, %11 : f32 | |
linalg.yield %12 : f32 | |
} -> tensor<1x1xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x1280xf32> | |
} | |
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]} | |
iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [2, 1280], strides = [1, 1] : tensor<2x1280xf32> -> !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
return | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
func.func @matmul_dispatch_0_matmul_like_2x1280x2816_f16xf16xf32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUVectorDistribute workgroup_size = [704, 1, 1] subgroup_size = 64, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = false, use_igemm_convolution = false>}>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) {iree_gpu.use_rocdl_buffer_instructions} : !iree_tensor_ext.dispatch.tensor<writeonly:tensor<2x1280xf32>> | |
%3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [2, 2816], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2x2816xf16>> -> tensor<2x2816xf16> | |
%4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [2816, 1280], strides = [1, 1] : !iree_tensor_ext.dispatch.tensor<readonly:tensor<2816x1280xf16>> -> tensor<2816x1280xf16> | |
%5 = tensor.empty() : tensor<2x1280xf32> | |
%6 = |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment