Skip to content

Instantly share code, notes, and snippets.

@pashu123
Created December 12, 2024 17:57
Show Gist options
  • Save pashu123/c5a4826c51b1a4a4c0cf336269dbdffe to your computer and use it in GitHub Desktop.
Save pashu123/c5a4826c51b1a4a4c0cf336269dbdffe to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- //
module {
func.func @unaligned_k(%arg0: tensor<128x258xf32>, %arg1: tensor<258x256xf32>) -> tensor<128x256xf32> {
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<128x256xf32>
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
return %2 : tensor<128x256xf32>
}
}
// -----// IR Dump After IREEImportPublicPass (iree-import-public) //----- //
module {
util.func public @unaligned_k(%arg0: tensor<128x258xf32>, %arg1: tensor<258x256xf32>) -> tensor<128x256xf32> {
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<128x256xf32>
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
util.return %2 : tensor<128x256xf32>
}
}
// -----// IR Dump After ImportMLProgramPass (iree-import-ml-program) //----- //
module {
util.func public @unaligned_k(%arg0: tensor<128x258xf32>, %arg1: tensor<258x256xf32>) -> tensor<128x256xf32> {
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<128x256xf32>
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
util.return %2 : tensor<128x256xf32>
}
}
// -----// IR Dump After SanitizeModuleNamesPass (iree-sanitize-module-names) //----- //
module {
util.func public @unaligned_k(%arg0: tensor<128x258xf32>, %arg1: tensor<258x256xf32>) -> tensor<128x256xf32> {
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<128x256xf32>
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
util.return %2 : tensor<128x256xf32>
}
}
// -----// IR Dump After ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- //
module {
util.func public @unaligned_k(%arg0: tensor<128x258xf32>, %arg1: tensor<258x256xf32>) -> tensor<128x256xf32> {
%0 = tensor.empty() : tensor<128x256xf32>
%1 = linalg.matmul ins(%arg0, %arg1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
util.return %1 : tensor<128x256xf32>
}
}
// -----// IR Dump After DemoteF64ToF32Pass (iree-input-conversion-demote-f64-to-f32) //----- //
module {
util.func public @unaligned_k(%arg0: tensor<128x258xf32>, %arg1: tensor<258x256xf32>) -> tensor<128x256xf32> {
%0 = tensor.empty() : tensor<128x256xf32>
%1 = linalg.matmul ins(%arg0, %arg1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
util.return %1 : tensor<128x256xf32>
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- //
module {
util.func public @unaligned_k(%arg0: tensor<128x258xf32>, %arg1: tensor<258x256xf32>) -> tensor<128x256xf32> {
%0 = tensor.empty() : tensor<128x256xf32>
%1 = linalg.matmul ins(%arg0, %arg1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
util.return %1 : tensor<128x256xf32>
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- //
module {
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = util.call @_unaligned_k(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
util.func private @_unaligned_k(%arg0: tensor<128x258xf32>, %arg1: tensor<258x256xf32>) -> tensor<128x256xf32> {
%0 = tensor.empty() : tensor<128x256xf32>
%1 = linalg.matmul ins(%arg0, %arg1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
util.return %1 : tensor<128x256xf32>
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_unaligned_k(%arg0: tensor<128x258xf32>, %arg1: tensor<258x256xf32>) -> tensor<128x256xf32> {
%0 = tensor.empty() : tensor<128x256xf32>
%1 = linalg.matmul ins(%arg0, %arg1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%0 : tensor<128x256xf32>) -> tensor<128x256xf32>
util.return %1 : tensor<128x256xf32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = util.call @_unaligned_k(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After Inliner (inline) //----- //
module {
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
module {
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- //
module attributes {hal.device.targets = [#hal.device.alias<"hip"> : !hal.device]} {
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- //
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #hal.device.alias<"hip"> : !hal.device
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- //
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #hal.device.alias<"hip"> : !hal.device
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FoldUnitExtentDimsPass (iree-dispatch-creation-fold-unit-extent-dims) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After PropagateLinalgTransposePass (iree-global-opt-propagate-linalg-transpose) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = iree_encoding.set_encoding %0 : tensor<128x258xf32> -> tensor<128x258xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>
%4 = iree_encoding.set_encoding %1 : tensor<258x256xf32> -> tensor<258x256xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>
%5 = iree_encoding.set_encoding %2 : tensor<128x256xf32> -> tensor<128x256xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<258x256xf32, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%5 : tensor<128x256xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<128x256xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>
%7 = iree_encoding.unset_encoding %6 : tensor<128x256xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<128x256xf32>
%8 = hal.tensor.export %7 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After MaterializeEncodingIntoNopPass (iree-codegen-materialize-encoding-into-nop) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After MaterializeHomogeneousEncodingsPass (iree-global-opt-materialize-homogeneous-encodings) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After SimplifyPackUnpackPass (iree-global-opt-simplify-pack-unpack) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After HoistIntoGlobalsPass (iree-util-hoist-into-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After JitGlobalsPass (iree-consteval-jit-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-dispatch-creation-tensor-pad-to-tensor-insert-slice) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = flow.dispatch.region -> (tensor<128x256xf32>) {
%5 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.return %5 : tensor<128x256xf32>
}
%4 = hal.tensor.export %3 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.region -> (tensor<128x256xf32>) {
%4 = tensor.empty() : tensor<128x256xf32>
%5 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%4 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.return %5 : tensor<128x256xf32>
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.region -> (tensor<128x256xf32>) {
%4 = tensor.empty() : tensor<128x256xf32>
%5 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%4 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.return %5 : tensor<128x256xf32>
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %arg4, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After StripDebugOpsPass (iree-util-strip-debug-ops) //----- //
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After OutlineConstantsPass (iree-flow-outline-constants) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
flow.executable private @unaligned_k_dispatch_0 {
flow.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<128x258xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<258x256xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%2 = tensor.empty() : tensor<128x256xf32>
%3 = linalg.matmul ins(%0, %1 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%2 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<128x258xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<258x256xf32>
%2 = flow.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0, %1) : (tensor<128x258xf32>, tensor<258x256xf32>) -> tensor<128x256xf32>
%3 = hal.tensor.export %2 "output0" : tensor<128x256xf32> -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c128 = arith.constant 128 : index
%c258 = arith.constant 258 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
%c258_2 = arith.constant 258 : index
%c256 = arith.constant 256 : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258_2, %c256]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%c0 = arith.constant 0 : index
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c128 = arith.constant 128 : index
%c258 = arith.constant 258 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
%c258_2 = arith.constant 258 : index
%c256 = arith.constant 256 : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258_2, %c256]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%c0 = arith.constant 0 : index
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After Inliner (inline) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After CombineInitializersPass (iree-util-combine-initializers) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x258xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<258x256xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<128x256xf32> : index
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<128x256xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- //
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c132096} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c264192} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c264192}
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%1[%c0 to %c132096 for %c132096], %3[%c0 to %c264192 for %c264192]) : (!stream.resource<*>{%c132096}, !stream.resource<*>{%c264192}) -> !stream.resource<*>{%c131072}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c131072} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%0[%c0 to %c132096 for %c132096], %1[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.timepoint.immediate => !stream.timepoint
%3 = stream.timepoint.immediate => !stream.timepoint
%4 = stream.timepoint.join max(%2, %3) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%4) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%7 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %7 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%5 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%2 = stream.timepoint.immediate => !stream.timepoint
%3 = stream.timepoint.immediate => !stream.timepoint
%4 = stream.timepoint.join max(%2, %3) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%4) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%7 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %7 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%5 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072} {
%4 = stream.async.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg2[%c0 to %c132096 for %c132096], %arg3[%c0 to %c264192 for %c264192]) : (!stream.resource<external>{%c132096}, !stream.resource<external>{%c264192}) -> !stream.resource<external>{%c131072}
stream.yield %4 : !stream.resource<external>{%c131072}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c131072}
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After ScheduleAllocationPass (iree-stream-schedule-allocation) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0_0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0_0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0_0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After PropagateSubrangesPass (iree-util-propagate-subranges) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0_0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After SCFToControlFlow (convert-scf-to-cf) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ElideTimepointsPass (iree-stream-elide-timepoints) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIteratorPass (iree-util-fixed-point-iterator) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseDispatchBindingsPass (iree-stream-fuse-dispatch-bindings) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: index) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0, %c0, %c0 : index, index, index) {
ro %arg2[%c0_0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0_0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0_0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchArgumentsPass (iree-stream-annotate-dispatch-arguments) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: index {stream.values = [0 : index]}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0, %c0, %c0 : index, index, index) {
ro %arg2[%c0_0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0_0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0_0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchAssumptionsPass (iree-stream-annotate-dispatch-assumptions) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: index {stream.values = [0 : index]}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}) {
%0:3 = util.assume.int
%arg3<umin = 0, umax = 0>,
%arg4<umin = 0, umax = 0>,
%arg5<umin = 0, umax = 0>
: index, index, index
%c0 = arith.constant 0 : index
%1 = stream.binding.subspan %arg0[%0#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%2 = stream.binding.subspan %arg1[%0#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%3 = stream.binding.subspan %arg2[%0#2] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%6 = tensor.empty() : tensor<128x256xf32>
%7 = linalg.matmul ins(%4, %5 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%6 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %7, %3, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0, %c0, %c0 : index, index, index) {
ro %arg2[%c0_0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0_0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0_0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After PackDispatchOperandsPass (iree-stream-pack-dispatch-operands) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) {
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%c32_i64 = arith.constant 32 : i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%c32_i64_0 = arith.constant 32 : i64
%7 = arith.shli %6, %c32_i64_0 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%c32_i64_1 = arith.constant 32 : i64
%12 = arith.shli %11, %c32_i64_1 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15:3 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>
: index, index, index
%c0 = arith.constant 0 : index
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%20 = flow.dispatch.tensor.load %17, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%21 = tensor.empty() : tensor<128x256xf32>
%22 = linalg.matmul ins(%19, %20 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%21 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %22, %18, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%c0_i64 = arith.constant 0 : i64
%c0_i32 = arith.constant 0 : i32
%c32_i64 = arith.constant 32 : i64
%c0_i64_1 = arith.constant 0 : i64
%c0_i32_2 = arith.constant 0 : i32
%c0_i64_3 = arith.constant 0 : i64
%c0_i32_4 = arith.constant 0 : i32
%c32_i64_5 = arith.constant 32 : i64
%c0_i64_6 = arith.constant 0 : i64
%c0_i32_7 = arith.constant 0 : i32
%c0_i64_8 = arith.constant 0 : i64
%c0_i32_9 = arith.constant 0 : i32
%c32_i64_10 = arith.constant 32 : i64
%c0_i64_11 = arith.constant 0 : i64
%c0_i32_12 = arith.constant 0 : i32
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0_i32, %c0_i32_2, %c0_i32_4, %c0_i32_7, %c0_i32_9, %c0_i32_12 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0_0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0_0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0_0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15:3 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>
: index, index, index
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%20 = flow.dispatch.tensor.load %17, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%21 = tensor.empty() : tensor<128x256xf32>
%22 = linalg.matmul ins(%19, %20 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%21 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %22, %18, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15:3 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>
: index, index, index
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%20 = flow.dispatch.tensor.load %17, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%21 = tensor.empty() : tensor<128x256xf32>
%22 = linalg.matmul ins(%19, %20 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%21 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %22, %18, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15:3 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>
: index, index, index
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%20 = flow.dispatch.tensor.load %17, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%21 = tensor.empty() : tensor<128x256xf32>
%22 = linalg.matmul ins(%19, %20 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%21 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %22, %18, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FoldUniformOperandsPass (iree-stream-fold-uniform-operands) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0_i32 = arith.constant 0 : i32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %c0_i32 : i32 to i64
%1 = arith.extui %c0_i32 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %c0_i32 : i32 to i64
%6 = arith.extui %c0_i32 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %c0_i32 : i32 to i64
%11 = arith.extui %c0_i32 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15:3 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>
: index, index, index
%16 = stream.binding.subspan %arg0[%15#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%17 = stream.binding.subspan %arg1[%15#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%18 = stream.binding.subspan %arg2[%15#2] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%20 = flow.dispatch.tensor.load %17, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%21 = tensor.empty() : tensor<128x256xf32>
%22 = linalg.matmul ins(%19, %20 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%21 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %22, %18, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmeticPass (iree-util-optimize-int-arithmetic) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPOPass (iree-util-ipo) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccessesPass (iree-util-simplify-global-accesses) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatternsPass (iree-util-apply-patterns) //----- //
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FoldGlobalsPass (iree-util-fold-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobalsPass (iree-util-fuse-globals) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
stream.executable private @unaligned_k_dispatch_0 {
stream.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeInterfacesPass (iree-hal-materialize-interfaces) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#pipeline_layout = #hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
hal.executable private @unaligned_k_dispatch_0 {
hal.executable.variant public @rocm_hsaco_fb target(#executable_target_rocm_hsaco_fb) {
hal.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 ordinal(0) layout(#pipeline_layout) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(#pipeline_layout) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@rocm_hsaco_fb::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After PruneExecutablesPass (iree-hal-prune-executables) //----- //
#executable_target_rocm_hsaco_fb = #hal.executable.target<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>
#pipeline_layout = #hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>
#device_target_hip = #hal.device.target<"hip", {legacy_sync}, [#executable_target_rocm_hsaco_fb]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_hip
hal.executable private @unaligned_k_dispatch_0 {
hal.executable.variant public @rocm_hsaco_fb target(#executable_target_rocm_hsaco_fb) {
hal.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 ordinal(0) layout(#pipeline_layout) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(#pipeline_layout) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
}
util.func public @unaligned_k(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @unaligned_k(%input0: tensor<128x258xf32>, %input1: tensor<258x256xf32>) -> (%output0: tensor<128x256xf32>)"}} {
%c131072 = arith.constant 131072 : index
%c264192 = arith.constant 264192 : index
%c132096 = arith.constant 132096 : index
%c0 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c258 = arith.constant 258 : index
%c128 = arith.constant 128 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c128, %c258]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<128x258xf32> in !stream.resource<external>{%c132096}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c258, %c256]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<258x256xf32> in !stream.resource<external>{%c264192}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c131072} => !stream.timepoint
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c132096}, %1 as %arg3: !stream.resource<external>{%c264192}, %result as %arg4: !stream.resource<external>{%c131072}) {
stream.cmd.dispatch @unaligned_k_dispatch_0::@rocm_hsaco_fb::@unaligned_k_dispatch_0_matmul_128x256x258_f32 {
ro %arg2[%c0 for %c132096] : !stream.resource<external>{%c132096},
ro %arg3[%c0 for %c264192] : !stream.resource<external>{%c264192},
wo %arg4[%c0 for %c131072] : !stream.resource<external>{%c131072}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c131072}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<128x256xf32> in !stream.resource<external>{%c131072} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After GPUGeneralizeNamedOpsPass (iree-codegen-gpu-generalize-named-ops) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After TypePropagationPass (iree-codegen-type-propagation) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After BubbleUpOrdinalOpsPass (iree-codegen-bubble-up-ordinal-ops) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After BufferizeCopyOnlyDispatchesPass (iree-codegen-bufferize-copy-only-dispatches) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After DecomposeSoftmaxPass (iree-codegen-decompose-softmax) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After MaterializeEncodingIntoNopPass (iree-codegen-materialize-encoding-into-nop) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After BufferizeCopyOnlyDispatchesPass (iree-codegen-bufferize-copy-only-dispatches) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After BlockDynamicDimensionsPass (iree-codegen-block-dynamic-dimensions) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After MaterializeTuningSpecsPass (iree-codegen-materialize-tuning-specs) //----- //
module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
// -----// IR Dump After MaterializeUserConfigsPass (iree-codegen-materialize-user-configs) //----- //
module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
// -----// IR Dump After LLVMGPUSelectLoweringStrategyPass (iree-llvmgpu-select-lowering-strategy) //----- //
module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
// -----// IR Dump After ConfigureTargetExecutableVariantsPass (iree-hal-configure-target-executable-variants) //----- //
hal.executable.variant public @rocm_hsaco_fb target(<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>) {
hal.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 ordinal(0) layout(#hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
// -----// IR Dump After ConfigureExecutablesPass (iree-hal-configure-executables) //----- //
hal.executable private @unaligned_k_dispatch_0 {
hal.executable.variant public @rocm_hsaco_fb target(<"rocm", "rocm-hsaco-fb", {abi = "hip", iree.gpu.target = #iree_gpu.target<arch = "gfx1100", features = "", wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8, subgroup = shuffle|arithmetic, dot = dp4xi8toi32, mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>], subgroup_size_choices = [32, 64], max_workgroup_sizes = [1024, 1024, 1024], max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536, max_workgroup_counts = [2147483647, 2147483647, 2147483647], max_load_instruction_bits = 128, simds_per_wgp = 4, vgpr_space_bits = 8192>>, ukernels = "none"}>) {
hal.executable.export public @unaligned_k_dispatch_0_matmul_128x256x258_f32 ordinal(0) layout(#hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
}
}
// -----// IR Dump After LowerExecutableUsingTransformDialectPass (iree-codegen-lower-executable-using-transform-dialect) //----- //
module {
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%3, %4 : tensor<128x258xf32>, tensor<258x256xf32>) outs(%5 : tensor<128x256xf32>) -> tensor<128x256xf32>
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
}
// -----// IR Dump After TileAndDistributeToWorkgroupsUsingForallOpPass (iree-codegen-tile-and-distribute-to-workgroups-using-forall-op) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [32, 258] [1, 1] : tensor<128x258xf32> to tensor<32x258xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [258, 128] [1, 1] : tensor<258x256xf32> to tensor<258x128xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice, %extracted_slice_0 : tensor<32x258xf32>, tensor<258x128xf32>) outs(%extracted_slice_1 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [32, 258] [1, 1] : tensor<128x258xf32> to tensor<32x258xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [258, 128] [1, 1] : tensor<258x256xf32> to tensor<258x128xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice, %extracted_slice_0 : tensor<32x258xf32>, tensor<258x128xf32>) outs(%extracted_slice_1 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [32, 258] [1, 1] : tensor<128x258xf32> to tensor<32x258xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [258, 128] [1, 1] : tensor<258x256xf32> to tensor<258x128xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice, %extracted_slice_0 : tensor<32x258xf32>, tensor<258x128xf32>) outs(%extracted_slice_1 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GPUPadOperandsPass (iree-codegen-gpu-pad-operands) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [32, 258] [1, 1] : tensor<128x258xf32> to tensor<32x258xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [258, 128] [1, 1] : tensor<258x256xf32> to tensor<258x128xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice, %extracted_slice_0 : tensor<32x258xf32>, tensor<258x128xf32>) outs(%extracted_slice_1 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GPUPromoteMatmulOperandsPass (iree-codegen-gpu-promote-matmul-operands) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [32, 258] [1, 1] : tensor<128x258xf32> to tensor<32x258xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [258, 128] [1, 1] : tensor<258x256xf32> to tensor<258x128xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = tensor.empty() : tensor<32x258xf32>
%8 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice : tensor<32x258xf32>) outs(%7 : tensor<32x258xf32>) -> tensor<32x258xf32>
%9 = tensor.empty() : tensor<258x128xf32>
%10 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<258x128xf32>) outs(%9 : tensor<258x128xf32>) -> tensor<258x128xf32>
%11 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%8, %10 : tensor<32x258xf32>, tensor<258x128xf32>) outs(%extracted_slice_1 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GPUPackToIntrinsicsPass (iree-codegen-gpu-pack-to-intrinsics) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [32, 258] [1, 1] : tensor<128x258xf32> to tensor<32x258xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [258, 128] [1, 1] : tensor<258x256xf32> to tensor<258x128xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = tensor.empty() : tensor<32x258xf32>
%8 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice : tensor<32x258xf32>) outs(%7 : tensor<32x258xf32>) -> tensor<32x258xf32>
%9 = tensor.empty() : tensor<258x128xf32>
%10 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<258x128xf32>) outs(%9 : tensor<258x128xf32>) -> tensor<258x128xf32>
%11 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%8, %10 : tensor<32x258xf32>, tensor<258x128xf32>) outs(%extracted_slice_1 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After DecomposeBoundaryPackUnPackOpsPass (iree-codegen-decompose-boundary-pack-unpack-ops) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [32, 258] [1, 1] : tensor<128x258xf32> to tensor<32x258xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [258, 128] [1, 1] : tensor<258x256xf32> to tensor<258x128xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = tensor.empty() : tensor<32x258xf32>
%8 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice : tensor<32x258xf32>) outs(%7 : tensor<32x258xf32>) -> tensor<32x258xf32>
%9 = tensor.empty() : tensor<258x128xf32>
%10 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<258x128xf32>) outs(%9 : tensor<258x128xf32>) -> tensor<258x128xf32>
%11 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%8, %10 : tensor<32x258xf32>, tensor<258x128xf32>) outs(%extracted_slice_1 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After ConcretizeMmaShapesPass (iree-gpu-concretize-mma-shapes) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [32, 258] [1, 1] : tensor<128x258xf32> to tensor<32x258xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [258, 128] [1, 1] : tensor<258x256xf32> to tensor<258x128xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = tensor.empty() : tensor<32x258xf32>
%8 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice : tensor<32x258xf32>) outs(%7 : tensor<32x258xf32>) -> tensor<32x258xf32>
%9 = tensor.empty() : tensor<258x128xf32>
%10 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<258x128xf32>) outs(%9 : tensor<258x128xf32>) -> tensor<258x128xf32>
%11 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%8, %10 : tensor<32x258xf32>, tensor<258x128xf32>) outs(%extracted_slice_1 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After PropagateReshapesByExpansionPass (iree-codegen-propagate-reshapes-by-expansion) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [32, 258] [1, 1] : tensor<128x258xf32> to tensor<32x258xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [258, 128] [1, 1] : tensor<258x256xf32> to tensor<258x128xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = tensor.empty() : tensor<32x258xf32>
%8 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice : tensor<32x258xf32>) outs(%7 : tensor<32x258xf32>) -> tensor<32x258xf32>
%9 = tensor.empty() : tensor<258x128xf32>
%10 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<258x128xf32>) outs(%9 : tensor<258x128xf32>) -> tensor<258x128xf32>
%11 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%8, %10 : tensor<32x258xf32>, tensor<258x128xf32>) outs(%extracted_slice_1 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GPUApplyTilingLevelPass (iree-codegen-gpu-apply-tiling-level) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %3[%arg0, %arg3] [32, 2] [1, 1] : tensor<128x258xf32> to tensor<32x2xf32>
%8 = tensor.empty() : tensor<32x2xf32>
%9 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<32x2xf32>) outs(%8 : tensor<32x2xf32>) -> tensor<32x2xf32>
%extracted_slice_1 = tensor.extract_slice %4[%arg3, %arg1] [2, 128] [1, 1] : tensor<258x256xf32> to tensor<2x128xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<2x128xf32>) outs(%10 : tensor<2x128xf32>) -> tensor<2x128xf32>
%12 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%9, %11 : tensor<32x2xf32>, tensor<2x128xf32>) outs(%arg4 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %3[%arg0, %arg3] [32, 2] [1, 1] : tensor<128x258xf32> to tensor<32x2xf32>
%8 = tensor.empty() : tensor<32x2xf32>
%9 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<32x2xf32>) outs(%8 : tensor<32x2xf32>) -> tensor<32x2xf32>
%extracted_slice_1 = tensor.extract_slice %4[%arg3, %arg1] [2, 128] [1, 1] : tensor<258x256xf32> to tensor<2x128xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<2x128xf32>) outs(%10 : tensor<2x128xf32>) -> tensor<2x128xf32>
%12 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%9, %11 : tensor<32x2xf32>, tensor<2x128xf32>) outs(%arg4 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %3[%arg0, %arg3] [32, 2] [1, 1] : tensor<128x258xf32> to tensor<32x2xf32>
%8 = tensor.empty() : tensor<32x2xf32>
%9 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<32x2xf32>) outs(%8 : tensor<32x2xf32>) -> tensor<32x2xf32>
%extracted_slice_1 = tensor.extract_slice %4[%arg3, %arg1] [2, 128] [1, 1] : tensor<258x256xf32> to tensor<2x128xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<2x128xf32>) outs(%10 : tensor<2x128xf32>) -> tensor<2x128xf32>
%12 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%9, %11 : tensor<32x2xf32>, tensor<2x128xf32>) outs(%arg4 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After DecomposePackUnPackOpsPass (iree-codegen-decompose-pack-unpack-ops) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %3[%arg0, %arg3] [32, 2] [1, 1] : tensor<128x258xf32> to tensor<32x2xf32>
%8 = tensor.empty() : tensor<32x2xf32>
%9 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<32x2xf32>) outs(%8 : tensor<32x2xf32>) -> tensor<32x2xf32>
%extracted_slice_1 = tensor.extract_slice %4[%arg3, %arg1] [2, 128] [1, 1] : tensor<258x256xf32> to tensor<2x128xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<2x128xf32>) outs(%10 : tensor<2x128xf32>) -> tensor<2x128xf32>
%12 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%9, %11 : tensor<32x2xf32>, tensor<2x128xf32>) outs(%arg4 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After ConcretizeMmaShapesPass (iree-gpu-concretize-mma-shapes) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %3[%arg0, %arg3] [32, 2] [1, 1] : tensor<128x258xf32> to tensor<32x2xf32>
%8 = tensor.empty() : tensor<32x2xf32>
%9 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<32x2xf32>) outs(%8 : tensor<32x2xf32>) -> tensor<32x2xf32>
%extracted_slice_1 = tensor.extract_slice %4[%arg3, %arg1] [2, 128] [1, 1] : tensor<258x256xf32> to tensor<2x128xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<2x128xf32>) outs(%10 : tensor<2x128xf32>) -> tensor<2x128xf32>
%12 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%9, %11 : tensor<32x2xf32>, tensor<2x128xf32>) outs(%arg4 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After PropagateReshapesByExpansionPass (iree-codegen-propagate-reshapes-by-expansion) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %3[%arg0, %arg3] [32, 2] [1, 1] : tensor<128x258xf32> to tensor<32x2xf32>
%8 = tensor.empty() : tensor<32x2xf32>
%9 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<32x2xf32>) outs(%8 : tensor<32x2xf32>) -> tensor<32x2xf32>
%extracted_slice_1 = tensor.extract_slice %4[%arg3, %arg1] [2, 128] [1, 1] : tensor<258x256xf32> to tensor<2x128xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<2x128xf32>) outs(%10 : tensor<2x128xf32>) -> tensor<2x128xf32>
%12 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%9, %11 : tensor<32x2xf32>, tensor<2x128xf32>) outs(%arg4 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %3[%arg0, %arg3] [32, 2] [1, 1] : tensor<128x258xf32> to tensor<32x2xf32>
%8 = tensor.empty() : tensor<32x2xf32>
%9 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<32x2xf32>) outs(%8 : tensor<32x2xf32>) -> tensor<32x2xf32>
%extracted_slice_1 = tensor.extract_slice %4[%arg3, %arg1] [2, 128] [1, 1] : tensor<258x256xf32> to tensor<2x128xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<2x128xf32>) outs(%10 : tensor<2x128xf32>) -> tensor<2x128xf32>
%12 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%9, %11 : tensor<32x2xf32>, tensor<2x128xf32>) outs(%arg4 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %3[%arg0, %arg3] [32, 2] [1, 1] : tensor<128x258xf32> to tensor<32x2xf32>
%8 = tensor.empty() : tensor<32x2xf32>
%9 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<32x2xf32>) outs(%8 : tensor<32x2xf32>) -> tensor<32x2xf32>
%extracted_slice_1 = tensor.extract_slice %4[%arg3, %arg1] [2, 128] [1, 1] : tensor<258x256xf32> to tensor<2x128xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<2x128xf32>) outs(%10 : tensor<2x128xf32>) -> tensor<2x128xf32>
%12 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%9, %11 : tensor<32x2xf32>, tensor<2x128xf32>) outs(%arg4 : tensor<32x128xf32>) -> tensor<32x128xf32>
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GPUApplyTilingLevelPass (iree-codegen-gpu-apply-tiling-level) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%8 = tensor.empty() : tensor<32x2xf32>
%9 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 2) step (1, 2) shared_outs(%arg7 = %8) -> (tensor<32x2xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg0]
%14 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg6, %arg3]
%extracted_slice_0 = tensor.extract_slice %3[%13, %14] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%15 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg7[%arg5, %arg6] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%10 = tensor.empty() : tensor<2x128xf32>
%11 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %10) -> (tensor<2x128xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%14 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg6, %arg1]
%extracted_slice_0 = tensor.extract_slice %4[%13, %14] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%15 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%12 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 128) step (1, 16) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %9[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %11[0, %arg6] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%8 = tensor.empty() : tensor<32x2xf32>
%9 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 2) step (1, 2) shared_outs(%arg7 = %8) -> (tensor<32x2xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg0]
%extracted_slice_0 = tensor.extract_slice %3[%13, %arg3] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%14 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %14 into %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%10 = tensor.empty() : tensor<2x128xf32>
%11 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %10) -> (tensor<2x128xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%14 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg6, %arg1]
%extracted_slice_0 = tensor.extract_slice %4[%13, %14] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%15 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%12 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 128) step (1, 16) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %9[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %11[0, %arg6] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%8 = tensor.empty() : tensor<32x2xf32>
%9 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 2) step (1, 2) shared_outs(%arg7 = %8) -> (tensor<32x2xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg0]
%extracted_slice_0 = tensor.extract_slice %3[%13, %arg3] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%14 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %14 into %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%10 = tensor.empty() : tensor<2x128xf32>
%11 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %10) -> (tensor<2x128xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%14 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg6, %arg1]
%extracted_slice_0 = tensor.extract_slice %4[%13, %14] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%15 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%12 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 128) step (1, 16) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %9[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %11[0, %arg6] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GPUApplyTilingLevelPass (iree-codegen-gpu-apply-tiling-level) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%8 = tensor.empty() : tensor<32x2xf32>
%9 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 2) step (1, 2) shared_outs(%arg7 = %8) -> (tensor<32x2xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg0]
%extracted_slice_0 = tensor.extract_slice %3[%13, %arg3] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%14 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %14 into %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%10 = tensor.empty() : tensor<2x128xf32>
%11 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %10) -> (tensor<2x128xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%14 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg6, %arg1]
%extracted_slice_0 = tensor.extract_slice %4[%13, %14] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%15 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%12 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 128) step (1, 16) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %9[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %11[0, %arg6] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After DistributeMmaToLanesPass (iree-gpu-distribute-mma-to-lanes) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 256) step (32, 128) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%extracted_slice = tensor.extract_slice %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%7 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%8 = tensor.empty() : tensor<32x2xf32>
%9 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 2) step (1, 2) shared_outs(%arg7 = %8) -> (tensor<32x2xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg0]
%extracted_slice_0 = tensor.extract_slice %3[%13, %arg3] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%14 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %14 into %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%10 = tensor.empty() : tensor<2x128xf32>
%11 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %10) -> (tensor<2x128xf32>) {
%13 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%14 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg6, %arg1]
%extracted_slice_0 = tensor.extract_slice %4[%13, %14] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%15 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%12 = scf.forall (%arg5, %arg6) = (0, 0) to (32, 128) step (1, 16) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%extracted_slice_0 = tensor.extract_slice %9[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %11[0, %arg6] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg7[%arg5, %arg6] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %12 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %7 into %arg2[%arg0, %arg1] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After NormalizeLoopBoundsPass (iree-codegen-normalize-loop-bounds) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%10 = tensor.empty() : tensor<32x2xf32>
%11 = scf.forall (%arg5, %arg6) in (32, 1) shared_outs(%arg7 = %10) -> (tensor<32x2xf32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 2)>(%arg6)
%16 = affine.apply affine_map<(d0) -> (d0)>(%arg5)
%17 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%16, %8]
%extracted_slice_0 = tensor.extract_slice %3[%17, %arg3] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%16, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%18 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %18 into %arg7[%16, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%12 = tensor.empty() : tensor<2x128xf32>
%13 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %12) -> (tensor<2x128xf32>) {
%15 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%16 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg6, %7]
%extracted_slice_0 = tensor.extract_slice %4[%15, %16] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%17 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%14 = scf.forall (%arg5, %arg6) in (32, 8) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg6)
%16 = affine.apply affine_map<(d0) -> (d0)>(%arg5)
%extracted_slice_0 = tensor.extract_slice %11[%16, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %13[0, %15] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%16, %15] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%17 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%16, %15] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %14 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %9 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After ConfigTrackingCanonicalizerPass (iree-codegen-config-tracking-canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%10 = tensor.empty() : tensor<32x2xf32>
%11 = scf.forall (%arg5, %arg6) in (32, 1) shared_outs(%arg7 = %10) -> (tensor<32x2xf32>) {
%15 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg5]
%extracted_slice_0 = tensor.extract_slice %3[%15, %arg3] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%16 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %16 into %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%12 = tensor.empty() : tensor<2x128xf32>
%13 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %12) -> (tensor<2x128xf32>) {
%15 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%16 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%arg6]
%extracted_slice_0 = tensor.extract_slice %4[%15, %16] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%17 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%14 = scf.forall (%arg5, %arg6) in (32, 8) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg6)
%extracted_slice_0 = tensor.extract_slice %11[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %13[0, %15] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%arg5, %15] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %16 into %arg7[%arg5, %15] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %14 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %9 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%10 = tensor.empty() : tensor<32x2xf32>
%11 = scf.forall (%arg5, %arg6) in (32, 1) shared_outs(%arg7 = %10) -> (tensor<32x2xf32>) {
%15 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg5]
%extracted_slice_0 = tensor.extract_slice %3[%15, %arg3] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%16 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %16 into %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%12 = tensor.empty() : tensor<2x128xf32>
%13 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %12) -> (tensor<2x128xf32>) {
%15 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%16 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%arg6]
%extracted_slice_0 = tensor.extract_slice %4[%15, %16] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%17 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%14 = scf.forall (%arg5, %arg6) in (32, 8) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg6)
%extracted_slice_0 = tensor.extract_slice %11[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %13[0, %15] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%arg5, %15] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %16 into %arg7[%arg5, %15] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %14 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %9 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After IREELoopInvariantCodeMotionPass (iree-loop-invariant-code-motion) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = tensor.empty() : tensor<32x2xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = scf.forall (%arg5, %arg6) in (32, 1) shared_outs(%arg7 = %9) -> (tensor<32x2xf32>) {
%15 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg5]
%extracted_slice_0 = tensor.extract_slice %3[%15, %arg3] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%16 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %16 into %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%13 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %10) -> (tensor<2x128xf32>) {
%15 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%16 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%arg6]
%extracted_slice_0 = tensor.extract_slice %4[%15, %16] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%17 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%14 = scf.forall (%arg5, %arg6) in (32, 8) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg6)
%extracted_slice_0 = tensor.extract_slice %12[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %13[0, %15] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%arg5, %15] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %16 into %arg7[%arg5, %15] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %14 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After OptimizeTensorInsertExtractSlicesPass (iree-codegen-optimize-tensor-insert-extract-slices) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = tensor.empty() : tensor<32x2xf32>
%10 = tensor.empty() : tensor<2x128xf32>
%11 = scf.for %arg3 = %c0 to %c258 step %c2 iter_args(%arg4 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = scf.forall (%arg5, %arg6) in (32, 1) shared_outs(%arg7 = %9) -> (tensor<32x2xf32>) {
%15 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg5]
%extracted_slice_0 = tensor.extract_slice %3[%15, %arg3] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%16 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x2xf32>) outs(%extracted_slice_1 : tensor<1x2xf32>) -> tensor<1x2xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %16 into %arg7[%arg5, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%13 = scf.forall (%arg5, %arg6) in (2, 128) shared_outs(%arg7 = %10) -> (tensor<2x128xf32>) {
%15 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%arg5, %arg3]
%16 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%arg6]
%extracted_slice_0 = tensor.extract_slice %4[%15, %16] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_1 = tensor.extract_slice %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%17 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_0 : tensor<1x1xf32>) outs(%extracted_slice_1 : tensor<1x1xf32>) -> tensor<1x1xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
%14 = scf.forall (%arg5, %arg6) in (32, 8) shared_outs(%arg7 = %arg4) -> (tensor<32x128xf32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg6)
%extracted_slice_0 = tensor.extract_slice %12[%arg5, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_1 = tensor.extract_slice %13[0, %15] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg7[%arg5, %15] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_0, %extracted_slice_1 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%extracted_slice_2 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %16 into %arg7[%arg5, %15] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.yield %14 : tensor<32x128xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GPUFuseAndHoistParallelLoopsPass (iree-codegen-gpu-fuse-and-hoist-parallel-loops) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14 = iree_gpu.barrier_region ins(%9 : tensor<32x2xf32>) {
^bb0(%arg8: tensor<32x2xf32>):
%17 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%18 = scf.for %arg9 = %17 to %c32 step %c256 iter_args(%arg10 = %arg8) -> (tensor<32x2xf32>) {
%19:2 = affine.delinearize_index %arg9 into (32, 1) : index, index
%20 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%19#0]
%extracted_slice_3 = tensor.extract_slice %3[%20, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg10[%19#0, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%21 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice = tensor.insert_slice %21 into %arg10[%19#0, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice : tensor<32x2xf32>
} {unroll_loop}
iree_gpu.yield %18 : tensor<32x2xf32>
} : tensor<32x2xf32>
%15 = iree_gpu.barrier_region ins(%10 : tensor<2x128xf32>) {
^bb0(%arg8: tensor<2x128xf32>):
%17 = scf.for %arg9 = %c0 to %c256 step %c256 iter_args(%arg10 = %arg8) -> (tensor<2x128xf32>) {
%18 = affine.apply affine_map<(d0, d1, d2) -> (d0 + d1 * 8 + d2)>(%arg9, %arg3, %arg4)
%19:2 = affine.delinearize_index %18 into (2, 128) : index, index
%20 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%19#0, %arg6]
%21 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%19#1]
%extracted_slice_3 = tensor.extract_slice %4[%20, %21] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg10[%19#0, %19#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%22 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %22 into %arg10[%19#0, %19#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
scf.yield %inserted_slice : tensor<2x128xf32>
} {unroll_loop}
iree_gpu.yield %17 : tensor<2x128xf32>
} : tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %15[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %16 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GPUGreedilyDistributeToThreadsPass (iree-codegen-gpu-greedily-distribute-to-threads) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14 = iree_gpu.barrier_region ins(%9 : tensor<32x2xf32>) {
^bb0(%arg8: tensor<32x2xf32>):
%17 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%18 = scf.for %arg9 = %17 to %c32 step %c256 iter_args(%arg10 = %arg8) -> (tensor<32x2xf32>) {
%19:2 = affine.delinearize_index %arg9 into (32, 1) : index, index
%20 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%19#0]
%extracted_slice_3 = tensor.extract_slice %3[%20, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg10[%19#0, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%21 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice = tensor.insert_slice %21 into %arg10[%19#0, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice : tensor<32x2xf32>
} {unroll_loop}
iree_gpu.yield %18 : tensor<32x2xf32>
} : tensor<32x2xf32>
%15 = iree_gpu.barrier_region ins(%10 : tensor<2x128xf32>) {
^bb0(%arg8: tensor<2x128xf32>):
%17 = scf.for %arg9 = %c0 to %c256 step %c256 iter_args(%arg10 = %arg8) -> (tensor<2x128xf32>) {
%18 = affine.apply affine_map<(d0, d1, d2) -> (d0 + d1 * 8 + d2)>(%arg9, %arg3, %arg4)
%19:2 = affine.delinearize_index %18 into (2, 128) : index, index
%20 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%19#0, %arg6]
%21 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%19#1]
%extracted_slice_3 = tensor.extract_slice %4[%20, %21] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg10[%19#0, %19#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%22 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %22 into %arg10[%19#0, %19#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
scf.yield %inserted_slice : tensor<2x128xf32>
} {unroll_loop}
iree_gpu.yield %17 : tensor<2x128xf32>
} : tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %15[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %16 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After TileLargeTensorsPass (iree-codegen-tile-large-tensors) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14 = iree_gpu.barrier_region ins(%9 : tensor<32x2xf32>) {
^bb0(%arg8: tensor<32x2xf32>):
%17 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%18 = scf.for %arg9 = %17 to %c32 step %c256 iter_args(%arg10 = %arg8) -> (tensor<32x2xf32>) {
%19:2 = affine.delinearize_index %arg9 into (32, 1) : index, index
%20 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%19#0]
%extracted_slice_3 = tensor.extract_slice %3[%20, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg10[%19#0, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%21 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice = tensor.insert_slice %21 into %arg10[%19#0, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice : tensor<32x2xf32>
} {unroll_loop}
iree_gpu.yield %18 : tensor<32x2xf32>
} : tensor<32x2xf32>
%15 = iree_gpu.barrier_region ins(%10 : tensor<2x128xf32>) {
^bb0(%arg8: tensor<2x128xf32>):
%17 = scf.for %arg9 = %c0 to %c256 step %c256 iter_args(%arg10 = %arg8) -> (tensor<2x128xf32>) {
%18 = affine.apply affine_map<(d0, d1, d2) -> (d0 + d1 * 8 + d2)>(%arg9, %arg3, %arg4)
%19:2 = affine.delinearize_index %18 into (2, 128) : index, index
%20 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%19#0, %arg6]
%21 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%19#1]
%extracted_slice_3 = tensor.extract_slice %4[%20, %21] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg10[%19#0, %19#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%22 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %22 into %arg10[%19#0, %19#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
scf.yield %inserted_slice : tensor<2x128xf32>
} {unroll_loop}
iree_gpu.yield %17 : tensor<2x128xf32>
} : tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %15[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %16 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14 = iree_gpu.barrier_region ins(%9 : tensor<32x2xf32>) {
^bb0(%arg8: tensor<32x2xf32>):
%17 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%18 = scf.for %arg9 = %17 to %c32 step %c256 iter_args(%arg10 = %arg8) -> (tensor<32x2xf32>) {
%19 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg9]
%extracted_slice_3 = tensor.extract_slice %3[%19, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg10[%arg9, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%20 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice = tensor.insert_slice %20 into %arg10[%arg9, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice : tensor<32x2xf32>
} {unroll_loop}
iree_gpu.yield %18 : tensor<32x2xf32>
} : tensor<32x2xf32>
%15 = iree_gpu.barrier_region ins(%10 : tensor<2x128xf32>) {
^bb0(%arg8: tensor<2x128xf32>):
%17 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%18:2 = affine.delinearize_index %17 into (2, 128) : index, index
%19 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%18#0, %arg6]
%20 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%18#1]
%extracted_slice_3 = tensor.extract_slice %4[%19, %20] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg8[%18#0, %18#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%21 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %21 into %arg8[%18#0, %18#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %inserted_slice : tensor<2x128xf32>
} : tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %15[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %16 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14 = iree_gpu.barrier_region ins(%9 : tensor<32x2xf32>) {
^bb0(%arg8: tensor<32x2xf32>):
%17 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%18 = scf.for %arg9 = %17 to %c32 step %c256 iter_args(%arg10 = %arg8) -> (tensor<32x2xf32>) {
%19 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg9]
%extracted_slice_3 = tensor.extract_slice %3[%19, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg10[%arg9, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%20 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice = tensor.insert_slice %20 into %arg10[%arg9, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice : tensor<32x2xf32>
} {unroll_loop}
iree_gpu.yield %18 : tensor<32x2xf32>
} : tensor<32x2xf32>
%15 = iree_gpu.barrier_region ins(%10 : tensor<2x128xf32>) {
^bb0(%arg8: tensor<2x128xf32>):
%17 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%18:2 = affine.delinearize_index %17 into (2, 128) : index, index
%19 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%18#0, %arg6]
%20 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%18#1]
%extracted_slice_3 = tensor.extract_slice %4[%19, %20] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg8[%18#0, %18#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%21 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %21 into %arg8[%18#0, %18#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %inserted_slice : tensor<2x128xf32>
} : tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %15[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %16 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After IREELoopInvariantCodeMotionPass (iree-loop-invariant-code-motion) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14 = iree_gpu.barrier_region ins(%9 : tensor<32x2xf32>) {
^bb0(%arg8: tensor<32x2xf32>):
%17 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%18 = scf.for %arg9 = %17 to %c32 step %c256 iter_args(%arg10 = %arg8) -> (tensor<32x2xf32>) {
%19 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg9]
%extracted_slice_3 = tensor.extract_slice %3[%19, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg10[%arg9, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%20 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice = tensor.insert_slice %20 into %arg10[%arg9, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice : tensor<32x2xf32>
} {unroll_loop}
iree_gpu.yield %18 : tensor<32x2xf32>
} : tensor<32x2xf32>
%15 = iree_gpu.barrier_region ins(%10 : tensor<2x128xf32>) {
^bb0(%arg8: tensor<2x128xf32>):
%17 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%18:2 = affine.delinearize_index %17 into (2, 128) : index, index
%19 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%18#0, %arg6]
%20 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%18#1]
%extracted_slice_3 = tensor.extract_slice %4[%19, %20] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg8[%18#0, %18#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%21 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %21 into %arg8[%18#0, %18#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %inserted_slice : tensor<2x128xf32>
} : tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %15[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%16 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %16 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CombineBarrierRegionsPass (iree-gpu-combine-barrier-regions) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%16 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%17 = scf.for %arg10 = %16 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%23 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_5 = tensor.extract_slice %3[%23, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_6 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%24 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_5 : tensor<1x2xf32>) outs(%extracted_slice_6 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_7 = tensor.insert_slice %24 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_7 : tensor<32x2xf32>
} {unroll_loop}
%18 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%19:2 = affine.delinearize_index %18 into (2, 128) : index, index
%20 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%19#0, %arg6]
%21 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%19#1]
%extracted_slice_3 = tensor.extract_slice %4[%20, %21] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%22 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %22 into %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %17, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14#0[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %14#1[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%15 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %15 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After VectorizeIREEGPUOpsPass (iree-gpu-vectorize-ops) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%16 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%17 = scf.for %arg10 = %16 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%23 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_5 = tensor.extract_slice %3[%23, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_6 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%24 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_5 : tensor<1x2xf32>) outs(%extracted_slice_6 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_7 = tensor.insert_slice %24 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_7 : tensor<32x2xf32>
} {unroll_loop}
%18 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%19:2 = affine.delinearize_index %18 into (2, 128) : index, index
%20 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%19#0, %arg6]
%21 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%19#1]
%extracted_slice_3 = tensor.extract_slice %4[%20, %21] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%22 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %22 into %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %17, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14#0[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %14#1[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%15 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %15 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After DecomposeConvolutionToLowerDimOpsPass (iree-codegen-decompose-convolution-to-lower-dim-ops) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%16 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%17 = scf.for %arg10 = %16 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%23 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_5 = tensor.extract_slice %3[%23, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_6 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%24 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_5 : tensor<1x2xf32>) outs(%extracted_slice_6 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_7 = tensor.insert_slice %24 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_7 : tensor<32x2xf32>
} {unroll_loop}
%18 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%19:2 = affine.delinearize_index %18 into (2, 128) : index, index
%20 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%19#0, %arg6]
%21 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%19#1]
%extracted_slice_3 = tensor.extract_slice %4[%20, %21] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%22 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %22 into %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %17, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14#0[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %14#1[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%15 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %15 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After DecomposeIm2colPass (iree-linalg-ext-decompose-im2col) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%16 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%17 = scf.for %arg10 = %16 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%23 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_5 = tensor.extract_slice %3[%23, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_6 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%24 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_5 : tensor<1x2xf32>) outs(%extracted_slice_6 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_7 = tensor.insert_slice %24 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_7 : tensor<32x2xf32>
} {unroll_loop}
%18 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%19:2 = affine.delinearize_index %18 into (2, 128) : index, index
%20 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%19#0, %arg6]
%21 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%19#1]
%extracted_slice_3 = tensor.extract_slice %4[%20, %21] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%22 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %22 into %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %17, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14#0[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %14#1[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%15 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %15 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After VectorizeIREEVectorExtOpsPass (iree-vector-ext-vectorize-ops) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%16 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%17 = scf.for %arg10 = %16 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%23 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_5 = tensor.extract_slice %3[%23, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_6 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%24 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_5 : tensor<1x2xf32>) outs(%extracted_slice_6 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_7 = tensor.insert_slice %24 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_7 : tensor<32x2xf32>
} {unroll_loop}
%18 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%19:2 = affine.delinearize_index %18 into (2, 128) : index, index
%20 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%19#0, %arg6]
%21 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%19#1]
%extracted_slice_3 = tensor.extract_slice %4[%20, %21] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%22 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %22 into %arg9[%19#0, %19#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %17, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14#0[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %14#1[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%15 = linalg.matmul {lowering_config = #iree_gpu.lowering_config<{promote_operands = [0, 1], reduction = [0, 0, 2], thread = [1, 16, 0], workgroup = [32, 128, 1]}>} ins(%extracted_slice_1, %extracted_slice_2 : tensor<1x2xf32>, tensor<2x16xf32>) outs(%arg7 : tensor<1x16xf32>) -> tensor<1x16xf32>
scf.yield %15 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GenericVectorizationPass (iree-codegen-generic-vectorization) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%27 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_5 = tensor.extract_slice %3[%27, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_6 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%28 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_5 : tensor<1x2xf32>) outs(%extracted_slice_6 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_7 = tensor.insert_slice %28 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_7 : tensor<32x2xf32>
} {unroll_loop}
%22 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%23:2 = affine.delinearize_index %22 into (2, 128) : index, index
%24 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%23#0, %arg6]
%25 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%23#1]
%extracted_slice_3 = tensor.extract_slice %4[%24, %25] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg9[%23#0, %23#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%26 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %26 into %arg9[%23#0, %23#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14#0[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %14#1[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%15 = vector.transfer_read %extracted_slice_1[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<1x2xf32>, vector<1x2xf32>
%16 = vector.transfer_read %extracted_slice_2[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<2x16xf32>, vector<2x16xf32>
%17 = vector.transfer_read %arg7[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<1x16xf32>, vector<1x16xf32>
%18 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %15, %16, %17 : vector<1x2xf32>, vector<2x16xf32> into vector<1x16xf32>
%19 = vector.transfer_write %18, %arg7[%c0, %c0] {in_bounds = [true, true]} : vector<1x16xf32>, tensor<1x16xf32>
scf.yield %19 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%27 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_5 = tensor.extract_slice %3[%27, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_6 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%28 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_5 : tensor<1x2xf32>) outs(%extracted_slice_6 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_7 = tensor.insert_slice %28 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_7 : tensor<32x2xf32>
} {unroll_loop}
%22 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%23:2 = affine.delinearize_index %22 into (2, 128) : index, index
%24 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%23#0, %arg6]
%25 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%23#1]
%extracted_slice_3 = tensor.extract_slice %4[%24, %25] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg9[%23#0, %23#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%26 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %26 into %arg9[%23#0, %23#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14#0[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %14#1[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%15 = vector.transfer_read %extracted_slice_1[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<1x2xf32>, vector<1x2xf32>
%16 = vector.transfer_read %extracted_slice_2[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<2x16xf32>, vector<2x16xf32>
%17 = vector.transfer_read %arg7[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<1x16xf32>, vector<1x16xf32>
%18 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %15, %16, %17 : vector<1x2xf32>, vector<2x16xf32> into vector<1x16xf32>
%19 = vector.transfer_write %18, %arg7[%c0, %c0] {in_bounds = [true, true]} : vector<1x16xf32>, tensor<1x16xf32>
scf.yield %19 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %extracted_slice_0) -> (tensor<1x16xf32>) {
%14:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%26 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_5 = tensor.extract_slice %3[%26, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_6 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%27 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_5 : tensor<1x2xf32>) outs(%extracted_slice_6 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_7 = tensor.insert_slice %27 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_7 : tensor<32x2xf32>
} {unroll_loop}
%22:2 = affine.delinearize_index %20 into (2, 128) : index, index
%23 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%22#0, %arg6]
%24 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%22#1]
%extracted_slice_3 = tensor.extract_slice %4[%23, %24] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_4 = tensor.extract_slice %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%25 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x1xf32>) outs(%extracted_slice_4 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %25 into %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%extracted_slice_1 = tensor.extract_slice %14#0[%arg3, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%extracted_slice_2 = tensor.extract_slice %14#1[0, %12] [2, 16] [1, 1] : tensor<2x128xf32> to tensor<2x16xf32>
%15 = vector.transfer_read %extracted_slice_1[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<1x2xf32>, vector<1x2xf32>
%16 = vector.transfer_read %extracted_slice_2[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<2x16xf32>, vector<2x16xf32>
%17 = vector.transfer_read %arg7[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<1x16xf32>, vector<1x16xf32>
%18 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %15, %16, %17 : vector<1x2xf32>, vector<2x16xf32> into vector<1x16xf32>
%19 = vector.transfer_write %18, %arg7[%c0, %c0] {in_bounds = [true, true]} : vector<1x16xf32>, tensor<1x16xf32>
scf.yield %19 : tensor<1x16xf32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After OptimizeTensorInsertExtractSlicesPass (iree-codegen-optimize-tensor-insert-extract-slices) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = vector.transfer_read %arg5[%arg3, %12], %cst {in_bounds = [true, true]} : tensor<32x128xf32>, vector<1x16xf32>
%14 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %13) -> (vector<1x16xf32>) {
%16:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%26 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_3 = tensor.extract_slice %3[%26, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%27 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_5 = tensor.insert_slice %27 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_5 : tensor<32x2xf32>
} {unroll_loop}
%22:2 = affine.delinearize_index %20 into (2, 128) : index, index
%23 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%22#0, %arg6]
%24 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%22#1]
%extracted_slice_1 = tensor.extract_slice %4[%23, %24] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_2 = tensor.extract_slice %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%25 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<1x1xf32>) outs(%extracted_slice_2 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %25 into %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%17 = vector.transfer_read %16#0[%arg3, %c0], %cst {in_bounds = [true, true]} : tensor<32x2xf32>, vector<1x2xf32>
%18 = vector.transfer_read %16#1[%c0, %12], %cst {in_bounds = [true, true]} : tensor<2x128xf32>, vector<2x16xf32>
%19 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %17, %18, %arg7 : vector<1x2xf32>, vector<2x16xf32> into vector<1x16xf32>
scf.yield %19 : vector<1x16xf32>
}
%15 = vector.transfer_write %14, %extracted_slice_0[%c0, %c0] {in_bounds = [true, true]} : vector<1x16xf32>, tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = vector.transfer_read %arg5[%arg3, %12], %cst {in_bounds = [true, true]} : tensor<32x128xf32>, vector<1x16xf32>
%14 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %13) -> (vector<1x16xf32>) {
%16:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%26 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_3 = tensor.extract_slice %3[%26, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%27 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_5 = tensor.insert_slice %27 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_5 : tensor<32x2xf32>
} {unroll_loop}
%22:2 = affine.delinearize_index %20 into (2, 128) : index, index
%23 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%22#0, %arg6]
%24 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%22#1]
%extracted_slice_1 = tensor.extract_slice %4[%23, %24] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_2 = tensor.extract_slice %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%25 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<1x1xf32>) outs(%extracted_slice_2 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %25 into %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%17 = vector.transfer_read %16#0[%arg3, %c0], %cst {in_bounds = [true, true]} : tensor<32x2xf32>, vector<1x2xf32>
%18 = vector.transfer_read %16#1[%c0, %12], %cst {in_bounds = [true, true]} : tensor<2x128xf32>, vector<2x16xf32>
%19 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %17, %18, %arg7 : vector<1x2xf32>, vector<2x16xf32> into vector<1x16xf32>
scf.yield %19 : vector<1x16xf32>
}
%15 = vector.transfer_write %14, %extracted_slice_0[%c0, %c0] {in_bounds = [true, true]} : vector<1x16xf32>, tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = vector.transfer_read %arg5[%arg3, %12], %cst {in_bounds = [true, true]} : tensor<32x128xf32>, vector<1x16xf32>
%14 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %13) -> (vector<1x16xf32>) {
%16:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%26 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_3 = tensor.extract_slice %3[%26, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%27 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_5 = tensor.insert_slice %27 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_5 : tensor<32x2xf32>
} {unroll_loop}
%22:2 = affine.delinearize_index %20 into (2, 128) : index, index
%23 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%22#0, %arg6]
%24 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%22#1]
%extracted_slice_1 = tensor.extract_slice %4[%23, %24] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_2 = tensor.extract_slice %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%25 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<1x1xf32>) outs(%extracted_slice_2 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %25 into %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%17 = vector.transfer_read %16#0[%arg3, %c0], %cst {in_bounds = [true, true]} : tensor<32x2xf32>, vector<1x2xf32>
%18 = vector.transfer_read %16#1[%c0, %12], %cst {in_bounds = [true, true]} : tensor<2x128xf32>, vector<2x16xf32>
%19 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %17, %18, %arg7 : vector<1x2xf32>, vector<2x16xf32> into vector<1x16xf32>
scf.yield %19 : vector<1x16xf32>
}
%15 = vector.transfer_write %14, %extracted_slice_0[%c0, %c0] {in_bounds = [true, true]} : vector<1x16xf32>, tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After CleanupBufferAllocViewPass (iree-codegen-cleanup-buffer-alloc-view) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = vector.transfer_read %arg5[%arg3, %12], %cst {in_bounds = [true, true]} : tensor<32x128xf32>, vector<1x16xf32>
%14 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %13) -> (vector<1x16xf32>) {
%16:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%26 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_3 = tensor.extract_slice %3[%26, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%27 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_5 = tensor.insert_slice %27 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_5 : tensor<32x2xf32>
} {unroll_loop}
%22:2 = affine.delinearize_index %20 into (2, 128) : index, index
%23 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%22#0, %arg6]
%24 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%22#1]
%extracted_slice_1 = tensor.extract_slice %4[%23, %24] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_2 = tensor.extract_slice %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%25 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<1x1xf32>) outs(%extracted_slice_2 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %25 into %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%17 = vector.transfer_read %16#0[%arg3, %c0], %cst {in_bounds = [true, true]} : tensor<32x2xf32>, vector<1x2xf32>
%18 = vector.transfer_read %16#1[%c0, %12], %cst {in_bounds = [true, true]} : tensor<2x128xf32>, vector<2x16xf32>
%19 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %17, %18, %arg7 : vector<1x2xf32>, vector<2x16xf32> into vector<1x16xf32>
scf.yield %19 : vector<1x16xf32>
}
%15 = vector.transfer_write %14, %extracted_slice_0[%c0, %c0] {in_bounds = [true, true]} : vector<1x16xf32>, tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After GPUCombineValueBarriersPass (iree-codegen-gpu-combine-value-barriers) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = tensor.empty() : tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = vector.transfer_read %arg5[%arg3, %12], %cst {in_bounds = [true, true]} : tensor<32x128xf32>, vector<1x16xf32>
%14 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %13) -> (vector<1x16xf32>) {
%16:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%26 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_3 = tensor.extract_slice %3[%26, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%27 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_5 = tensor.insert_slice %27 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_5 : tensor<32x2xf32>
} {unroll_loop}
%22:2 = affine.delinearize_index %20 into (2, 128) : index, index
%23 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%22#0, %arg6]
%24 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%22#1]
%extracted_slice_1 = tensor.extract_slice %4[%23, %24] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_2 = tensor.extract_slice %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%25 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<1x1xf32>) outs(%extracted_slice_2 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %25 into %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%17 = vector.transfer_read %16#0[%arg3, %c0], %cst {in_bounds = [true, true]} : tensor<32x2xf32>, vector<1x2xf32>
%18 = vector.transfer_read %16#1[%c0, %12], %cst {in_bounds = [true, true]} : tensor<2x128xf32>, vector<2x16xf32>
%19 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %17, %18, %arg7 : vector<1x2xf32>, vector<2x16xf32> into vector<1x16xf32>
scf.yield %19 : vector<1x16xf32>
}
%15 = vector.transfer_write %14, %extracted_slice_0[%c0, %c0] {in_bounds = [true, true]} : vector<1x16xf32>, tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After EliminateEmptyTensorsPass (iree-eliminate-empty-tensors) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>> -> tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = vector.transfer_read %arg5[%arg3, %12], %cst {in_bounds = [true, true]} : tensor<32x128xf32>, vector<1x16xf32>
%14 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %13) -> (vector<1x16xf32>) {
%16:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%26 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_3 = tensor.extract_slice %3[%26, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%27 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_5 = tensor.insert_slice %27 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_5 : tensor<32x2xf32>
} {unroll_loop}
%22:2 = affine.delinearize_index %20 into (2, 128) : index, index
%23 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%22#0, %arg6]
%24 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%22#1]
%extracted_slice_1 = tensor.extract_slice %4[%23, %24] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_2 = tensor.extract_slice %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%25 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<1x1xf32>) outs(%extracted_slice_2 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %25 into %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<32x2xf32>, tensor<2x128xf32>
%17 = vector.transfer_read %16#0[%arg3, %c0], %cst {in_bounds = [true, true]} : tensor<32x2xf32>, vector<1x2xf32>
%18 = vector.transfer_read %16#1[%c0, %12], %cst {in_bounds = [true, true]} : tensor<2x128xf32>, vector<2x16xf32>
%19 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>} %17, %18, %arg7 : vector<1x2xf32>, vector<2x16xf32> into vector<1x16xf32>
scf.yield %19 : vector<1x16xf32>
}
%15 = vector.transfer_write %14, %extracted_slice_0[%c0, %c0] {in_bounds = [true, true]} : vector<1x16xf32>, tensor<1x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %15 into %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<1x16xf32> into tensor<32x128xf32>
}
} {mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>]}
scf.forall.in_parallel {
tensor.parallel_insert_slice %11 into %arg2[%8, %7] [32, 128] [1, 1] : tensor<32x128xf32> into tensor<128x256xf32>
}
} {mapping = [#iree_codegen.workgroup_mapping<y>, #iree_codegen.workgroup_mapping<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : tensor<128x256xf32> -> !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
return
}
// -----// IR Dump After EmptyTensorToAllocTensor (empty-tensor-to-alloc-tensor) //----- //
func.func @unaligned_k_dispatch_0_matmul_128x256x258_f32() attributes {translation_info = #iree_codegen.translation_info<pipeline = LLVMGPUTileAndFuse workgroup_size = [32, 8, 1] subgroup_size = 32, {gpu_pipeline_options = #iree_gpu.pipeline_options<prefetch_shared_memory = false, no_reduce_shared_memory_bank_conflicts = true, use_igemm_convolution = false>}>} {
%cst = arith.constant 0.000000e+00 : f32
%c256 = arith.constant 256 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c258 = arith.constant 258 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<128x258xf32>>
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<258x256xf32>>
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 258], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x258xf32>> -> tensor<128x258xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [258, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<258x256xf32>> -> tensor<258x256xf32>
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<writeonly:tensor<128x256xf32>> -> tensor<128x256xf32>
%6 = scf.forall (%arg0, %arg1) in (4, 2) shared_outs(%arg2 = %5) -> (tensor<128x256xf32>) {
%7 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
%8 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg0)
%extracted_slice = tensor.extract_slice %arg2[%8, %7] [32, 128] [1, 1] : tensor<128x256xf32> to tensor<32x128xf32>
%9 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<32x2xf32>
%10 = bufferization.alloc_tensor() {memory_space = #gpu.address_space<workgroup>} : tensor<2x128xf32>
%11 = scf.forall (%arg3, %arg4) in (32, 8) shared_outs(%arg5 = %extracted_slice) -> (tensor<32x128xf32>) {
%12 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
%extracted_slice_0 = tensor.extract_slice %arg5[%arg3, %12] [1, 16] [1, 1] : tensor<32x128xf32> to tensor<1x16xf32>
%13 = vector.transfer_read %arg5[%arg3, %12], %cst {in_bounds = [true, true]} : tensor<32x128xf32>, vector<1x16xf32>
%14 = scf.for %arg6 = %c0 to %c258 step %c2 iter_args(%arg7 = %13) -> (vector<1x16xf32>) {
%16:2 = iree_gpu.barrier_region ins(%9, %10 : tensor<32x2xf32>, tensor<2x128xf32>) {
^bb0(%arg8: tensor<32x2xf32>, %arg9: tensor<2x128xf32>):
%20 = affine.apply affine_map<(d0, d1) -> (d0 * 8 + d1)>(%arg3, %arg4)
%21 = scf.for %arg10 = %20 to %c32 step %c256 iter_args(%arg11 = %arg8) -> (tensor<32x2xf32>) {
%26 = affine.apply affine_map<(d0)[s0] -> (d0 * 32 + s0)>(%arg0)[%arg10]
%extracted_slice_3 = tensor.extract_slice %3[%26, %arg6] [1, 2] [1, 1] : tensor<128x258xf32> to tensor<1x2xf32>
%extracted_slice_4 = tensor.extract_slice %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<32x2xf32> to tensor<1x2xf32>
%27 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_3 : tensor<1x2xf32>) outs(%extracted_slice_4 : tensor<1x2xf32>) -> tensor<1x2xf32>
%inserted_slice_5 = tensor.insert_slice %27 into %arg11[%arg10, 0] [1, 2] [1, 1] : tensor<1x2xf32> into tensor<32x2xf32>
scf.yield %inserted_slice_5 : tensor<32x2xf32>
} {unroll_loop}
%22:2 = affine.delinearize_index %20 into (2, 128) : index, index
%23 = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%22#0, %arg6]
%24 = affine.apply affine_map<(d0)[s0] -> (d0 * 128 + s0)>(%arg1)[%22#1]
%extracted_slice_1 = tensor.extract_slice %4[%23, %24] [1, 1] [1, 1] : tensor<258x256xf32> to tensor<1x1xf32>
%extracted_slice_2 = tensor.extract_slice %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<2x128xf32> to tensor<1x1xf32>
%25 = linalg.copy {lowering_config = #iree_gpu.derived_thread_config} ins(%extracted_slice_1 : tensor<1x1xf32>) outs(%extracted_slice_2 : tensor<1x1xf32>) -> tensor<1x1xf32>
%inserted_slice = tensor.insert_slice %25 into %arg9[%22#0, %22#1] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<2x128xf32>
iree_gpu.yield %21, %inserted_slice : tensor<32x2xf32>, tensor<2x128xf32>
} : tensor<3
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment