Created
November 8, 2024 15:07
-
-
Save pashu123/6a5fd693bb58aac8e1c6e9edd9d3386e to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu_features = "+avx512f", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "x86_64-none-elf"}> | |
#translation = #iree_codegen.translation_info<CPUDefault> | |
module { | |
func.func @quantized_matmul_neither_zp_0_dynamic(%arg0: tensor<256x256xi8>, %arg1: tensor<256x256xi8>, %arg2: i32, %arg3: i32, %arg4: tensor<256x256xi32>) -> tensor<256x256xi32> attributes {hal.executable.target = #executable_target_embedded_elf_x86_64_, translation_info = #translation} { | |
%0 = linalg.quantized_matmul ins(%arg0, %arg1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%arg4 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
return %0 : tensor<256x256xi32> | |
} | |
} | |
// -----// IR Dump After IREEImportPublicPass (iree-import-public) //----- // | |
module { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: tensor<256x256xi8>, %arg1: tensor<256x256xi8>, %arg2: i32, %arg3: i32, %arg4: tensor<256x256xi32>) -> tensor<256x256xi32> { | |
%0 = linalg.quantized_matmul ins(%arg0, %arg1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%arg4 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
util.return %0 : tensor<256x256xi32> | |
} | |
} | |
// -----// IR Dump After ImportMLProgramPass (iree-import-ml-program) //----- // | |
module { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: tensor<256x256xi8>, %arg1: tensor<256x256xi8>, %arg2: i32, %arg3: i32, %arg4: tensor<256x256xi32>) -> tensor<256x256xi32> { | |
%0 = linalg.quantized_matmul ins(%arg0, %arg1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%arg4 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
util.return %0 : tensor<256x256xi32> | |
} | |
} | |
// -----// IR Dump After SanitizeModuleNamesPass (iree-sanitize-module-names) //----- // | |
module { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: tensor<256x256xi8>, %arg1: tensor<256x256xi8>, %arg2: i32, %arg3: i32, %arg4: tensor<256x256xi32>) -> tensor<256x256xi32> { | |
%0 = linalg.quantized_matmul ins(%arg0, %arg1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%arg4 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
util.return %0 : tensor<256x256xi32> | |
} | |
} | |
// -----// IR Dump After ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- // | |
module { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: tensor<256x256xi8>, %arg1: tensor<256x256xi8>, %arg2: i32, %arg3: i32, %arg4: tensor<256x256xi32>) -> tensor<256x256xi32> { | |
%0 = linalg.quantized_matmul ins(%arg0, %arg1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%arg4 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
util.return %0 : tensor<256x256xi32> | |
} | |
} | |
// -----// IR Dump After DemoteF64ToF32Pass (iree-input-conversion-demote-f64-to-f32) //----- // | |
module { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: tensor<256x256xi8>, %arg1: tensor<256x256xi8>, %arg2: i32, %arg3: i32, %arg4: tensor<256x256xi32>) -> tensor<256x256xi32> { | |
%0 = linalg.quantized_matmul ins(%arg0, %arg1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%arg4 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
util.return %0 : tensor<256x256xi32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- // | |
module { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: tensor<256x256xi8>, %arg1: tensor<256x256xi8>, %arg2: i32, %arg3: i32, %arg4: tensor<256x256xi32>) -> tensor<256x256xi32> { | |
%0 = linalg.quantized_matmul ins(%arg0, %arg1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%arg4 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
util.return %0 : tensor<256x256xi32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- // | |
module { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = util.call @_quantized_matmul_neither_zp_0_dynamic(%0, %1, %arg2, %arg3, %2) : (tensor<256x256xi8>, tensor<256x256xi8>, i32, i32, tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
util.func private @_quantized_matmul_neither_zp_0_dynamic(%arg0: tensor<256x256xi8>, %arg1: tensor<256x256xi8>, %arg2: i32, %arg3: i32, %arg4: tensor<256x256xi32>) -> tensor<256x256xi32> { | |
%0 = linalg.quantized_matmul ins(%arg0, %arg1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%arg4 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
util.return %0 : tensor<256x256xi32> | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func private @_quantized_matmul_neither_zp_0_dynamic(%arg0: tensor<256x256xi8>, %arg1: tensor<256x256xi8>, %arg2: i32, %arg3: i32, %arg4: tensor<256x256xi32>) -> tensor<256x256xi32> { | |
%0 = linalg.quantized_matmul ins(%arg0, %arg1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%arg4 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
util.return %0 : tensor<256x256xi32> | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = util.call @_quantized_matmul_neither_zp_0_dynamic(%0, %1, %arg2, %arg3, %2) : (tensor<256x256xi8>, tensor<256x256xi8>, i32, i32, tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
module { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
module { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {hal.device.targets = [#device_target_local]} { | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.quantized_matmul ins(%0, %1, %arg2, %arg3 : tensor<256x256xi8>, tensor<256x256xi8>, i32, i32) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = hal.tensor.export %3 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldUnitExtentDimsPass (iree-dispatch-creation-fold-unit-extent-dims) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%8 = tensor.empty() : tensor<256xi32> | |
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<256xi32>) -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%9 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%15 = arith.muli %in_0, %in_1 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_4 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%13 = arith.extsi %in : i8 to i32 | |
%14 = arith.addi %13, %out : i32 | |
linalg.yield %14 : i32 | |
} -> tensor<256xi32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%13 = arith.extsi %in : i8 to i32 | |
%14 = arith.addi %13, %out : i32 | |
linalg.yield %14 : i32 | |
} -> tensor<256xi32> | |
%9 = arith.muli %arg2, %arg3 : i32 | |
%10 = arith.muli %9, %c256_i32 : i32 | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %8, %arg2, %10 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%13 = arith.muli %in_0, %in_1 : i32 | |
%14 = arith.subi %in, %13 : i32 | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %14, %15 : i32 | |
%17 = arith.addi %16, %in_4 : i32 | |
linalg.yield %17 : i32 | |
} -> tensor<256x256xi32> | |
%12 = hal.tensor.export %11 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%13 = arith.extsi %in : i8 to i32 | |
%14 = arith.addi %13, %out : i32 | |
linalg.yield %14 : i32 | |
} -> tensor<256xi32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%13 = arith.extsi %in : i8 to i32 | |
%14 = arith.addi %13, %out : i32 | |
linalg.yield %14 : i32 | |
} -> tensor<256xi32> | |
%9 = arith.muli %arg2, %arg3 : i32 | |
%10 = arith.muli %9, %c256_i32 : i32 | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %8, %arg2, %10 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%13 = arith.muli %in_0, %in_1 : i32 | |
%14 = arith.subi %in, %13 : i32 | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %14, %15 : i32 | |
%17 = arith.addi %16, %in_4 : i32 | |
linalg.yield %17 : i32 | |
} -> tensor<256x256xi32> | |
%12 = hal.tensor.export %11 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = linalg.matmul ins(%0, %1 : tensor<256x256xi8>, tensor<256x256xi8>) outs(%2 : tensor<256x256xi32>) -> tensor<256x256xi32> | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = tensor.empty() : tensor<256xi32> | |
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<256xi32>) -> tensor<256xi32> | |
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%13 = arith.extsi %in : i8 to i32 | |
%14 = arith.addi %13, %out : i32 | |
linalg.yield %14 : i32 | |
} -> tensor<256xi32> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%6 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%13 = arith.extsi %in : i8 to i32 | |
%14 = arith.addi %13, %out : i32 | |
linalg.yield %14 : i32 | |
} -> tensor<256xi32> | |
%9 = arith.muli %arg2, %arg3 : i32 | |
%10 = arith.muli %9, %c256_i32 : i32 | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%3, %7, %arg3, %8, %arg2, %10 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%13 = arith.muli %in_0, %in_1 : i32 | |
%14 = arith.subi %in, %13 : i32 | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %14, %15 : i32 | |
%17 = arith.addi %16, %in_4 : i32 | |
linalg.yield %17 : i32 | |
} -> tensor<256x256xi32> | |
%12 = hal.tensor.export %11 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = iree_encoding.set_encoding %0 : tensor<256x256xi8> -> tensor<256x256xi8, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [i8, i8, i32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%4 = iree_encoding.set_encoding %1 : tensor<256x256xi8> -> tensor<256x256xi8, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [i8, i8, i32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%5 = iree_encoding.set_encoding %2 : tensor<256x256xi32> -> tensor<256x256xi32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [i8, i8, i32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = linalg.matmul ins(%3, %4 : tensor<256x256xi8, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [i8, i8, i32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<256x256xi8, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [i8, i8, i32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%5 : tensor<256x256xi32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [i8, i8, i32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<256x256xi32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [i8, i8, i32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = iree_encoding.unset_encoding %6 : tensor<256x256xi32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [i8, i8, i32], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<256x256xi32> | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = tensor.empty() : tensor<256xi32> | |
%10 = linalg.fill ins(%c0_i32 : i32) outs(%9 : tensor<256xi32>) -> tensor<256xi32> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%10 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%10 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
%13 = arith.muli %arg2, %arg3 : i32 | |
%14 = arith.muli %13, %c256_i32 : i32 | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%7, %11, %arg3, %12, %arg2, %14 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%17 = arith.muli %in_0, %in_1 : i32 | |
%18 = arith.subi %in, %17 : i32 | |
%19 = arith.muli %in_2, %in_3 : i32 | |
%20 = arith.subi %18, %19 : i32 | |
%21 = arith.addi %20, %in_4 : i32 | |
linalg.yield %21 : i32 | |
} -> tensor<256x256xi32> | |
%16 = hal.tensor.export %15 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %16 : !hal.buffer_view | |
} | |
// -----// IR Dump After CPUMaterializeHostEncodingPass (iree-codegen-cpu-materialize-host-encoding) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0_i8 = arith.constant 0 : i8 | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 padding_value(%c0_i8 : i8) outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 padding_value(%c0_i8 : i8) outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %4 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%5 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 padding_value(%c0_i32 : i32) outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %5 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%7 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %6 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %7 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = tensor.empty() : tensor<256xi32> | |
%10 = linalg.fill ins(%c0_i32 : i32) outs(%9 : tensor<256xi32>) -> tensor<256xi32> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%10 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
%12 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%10 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
%13 = arith.muli %arg2, %arg3 : i32 | |
%14 = arith.muli %13, %c256_i32 : i32 | |
%15 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %11, %arg3, %12, %arg2, %14 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %in, %17 : i32 | |
%19 = arith.muli %in_4, %in_5 : i32 | |
%20 = arith.subi %18, %19 : i32 | |
%21 = arith.addi %20, %in_6 : i32 | |
linalg.yield %21 : i32 | |
} -> tensor<256x256xi32> | |
%16 = hal.tensor.export %15 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %16 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeHomogeneousEncodingsPass (iree-global-opt-materialize-homogeneous-encodings) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0_i8 = arith.constant 0 : i8 | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 padding_value(%c0_i8 : i8) outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 padding_value(%c0_i8 : i8) outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %4 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%5 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 padding_value(%c0_i32 : i32) outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %5 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%7 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %6 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %7 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = tensor.empty() : tensor<256xi32> | |
%10 = linalg.fill ins(%c0_i32 : i32) outs(%9 : tensor<256xi32>) -> tensor<256xi32> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%10 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
%12 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%10 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
%13 = arith.muli %arg2, %arg3 : i32 | |
%14 = arith.muli %13, %c256_i32 : i32 | |
%15 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %11, %arg3, %12, %arg2, %14 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %in, %17 : i32 | |
%19 = arith.muli %in_4, %in_5 : i32 | |
%20 = arith.subi %18, %19 : i32 | |
%21 = arith.addi %20, %in_6 : i32 | |
linalg.yield %21 : i32 | |
} -> tensor<256x256xi32> | |
%16 = hal.tensor.export %15 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %16 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %4 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%5 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %5 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%7 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %6 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %7 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = tensor.empty() : tensor<256xi32> | |
%10 = linalg.fill ins(%c0_i32 : i32) outs(%9 : tensor<256xi32>) -> tensor<256xi32> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%10 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
%12 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%10 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
%13 = arith.muli %arg2, %arg3 : i32 | |
%14 = arith.muli %13, %c256_i32 : i32 | |
%15 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %11, %arg3, %12, %arg2, %14 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%17 = arith.muli %in_2, %in_3 : i32 | |
%18 = arith.subi %in, %17 : i32 | |
%19 = arith.muli %in_4, %in_5 : i32 | |
%20 = arith.subi %18, %19 : i32 | |
%21 = arith.addi %20, %in_6 : i32 | |
linalg.yield %21 : i32 | |
} -> tensor<256x256xi32> | |
%16 = hal.tensor.export %15 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %16 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SimplifyPackUnpackPass (iree-global-opt-simplify-pack-unpack) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After HoistIntoGlobals (iree-util-hoist-into-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After JitGlobalsPass (iree-consteval-jit-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-dispatch-creation-tensor-pad-to-tensor-insert-slice) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%pack_0 = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
%4 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack_1 = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
%5 = linalg.mmt4d ins(%pack, %pack_0 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%pack_1 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
%6 = tensor.empty() : tensor<256x256xi32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = linalg.fill ins(%c0_i32 : i32) outs(%7 : tensor<256xi32>) -> tensor<256xi32> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%8 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
%11 = arith.muli %arg2, %arg3 : i32 | |
%12 = arith.muli %11, %c256_i32 : i32 | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %9, %arg3, %10, %arg2, %12 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%6 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %in_6: i32, %out: i32): | |
%15 = arith.muli %in_2, %in_3 : i32 | |
%16 = arith.subi %in, %15 : i32 | |
%17 = arith.muli %in_4, %in_5 : i32 | |
%18 = arith.subi %16, %17 : i32 | |
%19 = arith.addi %18, %in_6 : i32 | |
linalg.yield %19 : i32 | |
} -> tensor<256x256xi32> | |
%14 = hal.tensor.export %13 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.region -> (tensor<16x128x16x2xi8>) { | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.return %pack : tensor<16x128x16x2xi8> | |
} | |
%5 = flow.dispatch.region -> (tensor<16x128x16x2xi8>) { | |
%pack = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.return %pack : tensor<16x128x16x2xi8> | |
} | |
%6 = tensor.empty() : tensor<16x16x16x16xi32> | |
%7 = flow.dispatch.region -> (tensor<16x16x16x16xi32>) { | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %6 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.return %pack : tensor<16x16x16x16xi32> | |
} | |
%8 = flow.dispatch.region -> (tensor<16x16x16x16xi32>) { | |
%18 = linalg.mmt4d ins(%4, %5 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%7 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.return %18 : tensor<16x16x16x16xi32> | |
} | |
%9 = tensor.empty() : tensor<256x256xi32> | |
%10 = tensor.empty() : tensor<256xi32> | |
%11 = linalg.fill ins(%c0_i32 : i32) outs(%10 : tensor<256xi32>) -> tensor<256xi32> | |
%12 = flow.dispatch.region -> (tensor<256xi32>) { | |
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%11 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%19 = arith.extsi %in : i8 to i32 | |
%20 = arith.addi %19, %out : i32 | |
linalg.yield %20 : i32 | |
} -> tensor<256xi32> | |
flow.return %18 : tensor<256xi32> | |
} | |
%13 = flow.dispatch.region -> (tensor<256xi32>) { | |
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%11 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%19 = arith.extsi %in : i8 to i32 | |
%20 = arith.addi %19, %out : i32 | |
linalg.yield %20 : i32 | |
} -> tensor<256xi32> | |
flow.return %18 : tensor<256xi32> | |
} | |
%14 = arith.muli %arg2, %arg3 : i32 | |
%15 = arith.muli %14, %c256_i32 : i32 | |
%16 = flow.dispatch.region -> (tensor<256x256xi32>) { | |
%unpack = tensor.unpack %8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %9 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg3, %13, %arg2, %15 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%9 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%19 = arith.muli %in_0, %in_1 : i32 | |
%20 = arith.subi %in, %19 : i32 | |
%21 = arith.muli %in_2, %in_3 : i32 | |
%22 = arith.subi %20, %21 : i32 | |
%23 = arith.addi %22, %in_4 : i32 | |
linalg.yield %23 : i32 | |
} -> tensor<256x256xi32> | |
flow.return %18 : tensor<256x256xi32> | |
} | |
%17 = hal.tensor.export %16 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.region -> (tensor<16x128x16x2xi8>) { | |
%13 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %13 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.return %pack : tensor<16x128x16x2xi8> | |
} | |
%4 = flow.dispatch.region -> (tensor<16x128x16x2xi8>) { | |
%13 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %13 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.return %pack : tensor<16x128x16x2xi8> | |
} | |
%5 = flow.dispatch.region -> (tensor<16x16x16x16xi32>) { | |
%13 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %13 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.return %pack : tensor<16x16x16x16xi32> | |
} | |
%6 = flow.dispatch.region -> (tensor<16x16x16x16xi32>) { | |
%13 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.return %13 : tensor<16x16x16x16xi32> | |
} | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = flow.dispatch.region -> (tensor<256xi32>) { | |
%13 = tensor.empty() : tensor<256xi32> | |
%c0_i32_0 = arith.constant 0 : i32 | |
%14 = linalg.fill ins(%c0_i32_0 : i32) outs(%13 : tensor<256xi32>) -> tensor<256xi32> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%14 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%16 = arith.extsi %in : i8 to i32 | |
%17 = arith.addi %16, %out : i32 | |
linalg.yield %17 : i32 | |
} -> tensor<256xi32> | |
flow.return %15 : tensor<256xi32> | |
} | |
%9 = flow.dispatch.region -> (tensor<256xi32>) { | |
%13 = tensor.empty() : tensor<256xi32> | |
%c0_i32_0 = arith.constant 0 : i32 | |
%14 = linalg.fill ins(%c0_i32_0 : i32) outs(%13 : tensor<256xi32>) -> tensor<256xi32> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%14 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%16 = arith.extsi %in : i8 to i32 | |
%17 = arith.addi %16, %out : i32 | |
linalg.yield %17 : i32 | |
} -> tensor<256xi32> | |
flow.return %15 : tensor<256xi32> | |
} | |
%10 = arith.muli %arg2, %arg3 : i32 | |
%11 = flow.dispatch.region -> (tensor<256x256xi32>) { | |
%c256_i32_0 = arith.constant 256 : i32 | |
%13 = arith.muli %arg2, %arg3 : i32 | |
%14 = tensor.empty() : tensor<256x256xi32> | |
%15 = arith.muli %13, %c256_i32_0 : i32 | |
%unpack = tensor.unpack %6 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %14 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%16 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %8, %arg3, %9, %arg2, %15 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%14 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %out: i32): | |
%17 = arith.muli %in_1, %in_2 : i32 | |
%18 = arith.subi %in, %17 : i32 | |
%19 = arith.muli %in_3, %in_4 : i32 | |
%20 = arith.subi %18, %19 : i32 | |
%21 = arith.addi %20, %in_5 : i32 | |
linalg.yield %21 : i32 | |
} -> tensor<256x256xi32> | |
flow.return %16 : tensor<256x256xi32> | |
} | |
%12 = hal.tensor.export %11 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.region -> (tensor<16x128x16x2xi8>) { | |
%13 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %13 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.return %pack : tensor<16x128x16x2xi8> | |
} | |
%4 = flow.dispatch.region -> (tensor<16x128x16x2xi8>) { | |
%13 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %1 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %13 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.return %pack : tensor<16x128x16x2xi8> | |
} | |
%5 = flow.dispatch.region -> (tensor<16x16x16x16xi32>) { | |
%13 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %13 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.return %pack : tensor<16x16x16x16xi32> | |
} | |
%6 = flow.dispatch.region -> (tensor<16x16x16x16xi32>) { | |
%13 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.return %13 : tensor<16x16x16x16xi32> | |
} | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = flow.dispatch.region -> (tensor<256xi32>) { | |
%13 = tensor.empty() : tensor<256xi32> | |
%c0_i32_0 = arith.constant 0 : i32 | |
%14 = linalg.fill ins(%c0_i32_0 : i32) outs(%13 : tensor<256xi32>) -> tensor<256xi32> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%14 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%16 = arith.extsi %in : i8 to i32 | |
%17 = arith.addi %16, %out : i32 | |
linalg.yield %17 : i32 | |
} -> tensor<256xi32> | |
flow.return %15 : tensor<256xi32> | |
} | |
%9 = flow.dispatch.region -> (tensor<256xi32>) { | |
%13 = tensor.empty() : tensor<256xi32> | |
%c0_i32_0 = arith.constant 0 : i32 | |
%14 = linalg.fill ins(%c0_i32_0 : i32) outs(%13 : tensor<256xi32>) -> tensor<256xi32> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%1 : tensor<256x256xi8>) outs(%14 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%16 = arith.extsi %in : i8 to i32 | |
%17 = arith.addi %16, %out : i32 | |
linalg.yield %17 : i32 | |
} -> tensor<256xi32> | |
flow.return %15 : tensor<256xi32> | |
} | |
%10 = arith.muli %arg2, %arg3 : i32 | |
%11 = flow.dispatch.region -> (tensor<256x256xi32>) { | |
%c256_i32_0 = arith.constant 256 : i32 | |
%13 = arith.muli %arg2, %arg3 : i32 | |
%14 = tensor.empty() : tensor<256x256xi32> | |
%15 = arith.muli %13, %c256_i32_0 : i32 | |
%unpack = tensor.unpack %6 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %14 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%16 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %8, %arg3, %9, %arg2, %15 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%14 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %out: i32): | |
%17 = arith.muli %in_1, %in_2 : i32 | |
%18 = arith.subi %in, %17 : i32 | |
%19 = arith.muli %in_3, %in_4 : i32 | |
%20 = arith.subi %18, %19 : i32 | |
%21 = arith.addi %20, %in_5 : i32 | |
linalg.yield %21 : i32 | |
} -> tensor<256x256xi32> | |
flow.return %16 : tensor<256x256xi32> | |
} | |
%12 = hal.tensor.export %11 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%13 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%14 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %14 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%13 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%14 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %13 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %14 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%13 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%14 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %14 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%13 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%14 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%15 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%16 = linalg.mmt4d ins(%13, %14 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%15 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %16, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} | |
%7 = tensor.empty() : tensor<256xi32> | |
%8 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%13 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%14 = tensor.empty() : tensor<256xi32> | |
%c0_i32_0 = arith.constant 0 : i32 | |
%15 = linalg.fill ins(%c0_i32_0 : i32) outs(%14 : tensor<256xi32>) -> tensor<256xi32> | |
%16 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%13 : tensor<256x256xi8>) outs(%15 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %16, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} | |
%9 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%13 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%14 = tensor.empty() : tensor<256xi32> | |
%c0_i32_0 = arith.constant 0 : i32 | |
%15 = linalg.fill ins(%c0_i32_0 : i32) outs(%14 : tensor<256xi32>) -> tensor<256xi32> | |
%16 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%13 : tensor<256x256xi8>) outs(%15 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%17 = arith.extsi %in : i8 to i32 | |
%18 = arith.addi %17, %out : i32 | |
linalg.yield %18 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %16, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} | |
%10 = arith.muli %arg2, %arg3 : i32 | |
%11 = flow.dispatch.workgroups(%arg2, %arg3, %6, %8, %9) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%15 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%c256_i32_0 = arith.constant 256 : i32 | |
%16 = arith.muli %arg5, %arg6 : i32 | |
%17 = tensor.empty() : tensor<256x256xi32> | |
%18 = arith.muli %16, %c256_i32_0 : i32 | |
%unpack = tensor.unpack %13 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %17 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %14, %arg6, %15, %arg5, %18 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%17 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %in_5: i32, %out: i32): | |
%20 = arith.muli %in_1, %in_2 : i32 | |
%21 = arith.subi %in, %20 : i32 | |
%22 = arith.muli %in_3, %in_4 : i32 | |
%23 = arith.subi %21, %22 : i32 | |
%24 = arith.addi %23, %in_5 : i32 | |
linalg.yield %24 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %19, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} | |
%12 = hal.tensor.export %11 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %12 : !hal.buffer_view | |
} | |
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%12 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%12 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = linalg.mmt4d ins(%11, %12 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%13 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %14, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} | |
%7 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} | |
%8 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} | |
%9 = flow.dispatch.workgroups(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%11 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%12 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%13 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%14 = arith.muli %arg5, %arg6 : i32 | |
%15 = tensor.empty() : tensor<256x256xi32> | |
%16 = arith.muli %14, %c256_i32 : i32 | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %15 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg6, %13, %arg5, %16 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%15 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%18 = arith.muli %in_0, %in_1 : i32 | |
%19 = arith.subi %in, %18 : i32 | |
%20 = arith.muli %in_2, %in_3 : i32 | |
%21 = arith.subi %19, %20 : i32 | |
%22 = arith.addi %21, %in_4 : i32 | |
linalg.yield %22 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %17, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%12 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%12 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = linalg.mmt4d ins(%11, %12 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%13 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %14, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} | |
%7 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} | |
%8 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} | |
%9 = flow.dispatch.workgroups(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%11 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%12 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%13 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%14 = arith.muli %arg5, %arg6 : i32 | |
%15 = tensor.empty() : tensor<256x256xi32> | |
%16 = arith.muli %14, %c256_i32 : i32 | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %15 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg6, %13, %arg5, %16 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%15 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%18 = arith.muli %in_0, %in_1 : i32 | |
%19 = arith.subi %in, %18 : i32 | |
%20 = arith.muli %in_2, %in_3 : i32 | |
%21 = arith.subi %19, %20 : i32 | |
%22 = arith.addi %21, %in_4 : i32 | |
linalg.yield %22 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %17, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%12 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%12 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = linalg.mmt4d ins(%11, %12 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%13 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %14, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = flow.dispatch.workgroups(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%11 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%12 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%13 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%14 = arith.muli %arg5, %arg6 : i32 | |
%15 = tensor.empty() : tensor<256x256xi32> | |
%16 = arith.muli %14, %c256_i32 : i32 | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %15 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg6, %13, %arg5, %16 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%15 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%18 = arith.muli %in_0, %in_1 : i32 | |
%19 = arith.subi %in, %18 : i32 | |
%20 = arith.muli %in_2, %in_3 : i32 | |
%21 = arith.subi %19, %20 : i32 | |
%22 = arith.addi %21, %in_4 : i32 | |
linalg.yield %22 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %17, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%12 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%12 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = linalg.mmt4d ins(%11, %12 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%13 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %14, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = flow.dispatch.workgroups(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%11 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%12 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%13 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%14 = arith.muli %arg5, %arg6 : i32 | |
%15 = tensor.empty() : tensor<256x256xi32> | |
%16 = arith.muli %14, %c256_i32 : i32 | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %15 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%17 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg6, %13, %arg5, %16 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%15 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%18 = arith.muli %in_0, %in_1 : i32 | |
%19 = arith.subi %in, %18 : i32 | |
%20 = arith.muli %in_2, %in_3 : i32 | |
%21 = arith.subi %19, %20 : i32 | |
%22 = arith.addi %21, %in_4 : i32 | |
linalg.yield %22 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %17, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%12 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%12 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = linalg.mmt4d ins(%11, %12 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%13 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %14, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = flow.dispatch.workgroups(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%11 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%12 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%13 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%14 = arith.muli %arg5, %arg6 : i32 | |
%15 = tensor.empty() : tensor<256x256xi32> | |
%16 = arith.muli %14, %c256_i32 : i32 | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %15 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg6, %13, %arg5, %16 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%15 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%18 = arith.muli %in_0, %in_1 : i32 | |
%19 = arith.subi %in, %18 : i32 | |
%20 = arith.muli %in_2, %in_3 : i32 | |
%21 = arith.subi %19, %20 : i32 | |
%22 = arith.addi %21, %in_4 : i32 | |
linalg.yield %22 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %17, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%12 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%12 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = linalg.mmt4d ins(%11, %12 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%13 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %14, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = flow.dispatch.workgroups(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%11 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%12 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%13 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%14 = arith.muli %arg5, %arg6 : i32 | |
%15 = tensor.empty() : tensor<256x256xi32> | |
%16 = arith.muli %14, %c256_i32 : i32 | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %15 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg6, %13, %arg5, %16 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%15 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%18 = arith.muli %in_0, %in_1 : i32 | |
%19 = arith.subi %in, %18 : i32 | |
%20 = arith.muli %in_2, %in_3 : i32 | |
%21 = arith.subi %19, %20 : i32 | |
%22 = arith.addi %21, %in_4 : i32 | |
linalg.yield %22 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %17, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%12 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%12 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = linalg.mmt4d ins(%11, %12 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%13 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %14, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = flow.dispatch.workgroups(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%11 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%12 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%13 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%14 = arith.muli %arg5, %arg6 : i32 | |
%15 = tensor.empty() : tensor<256x256xi32> | |
%16 = arith.muli %14, %c256_i32 : i32 | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %15 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg6, %13, %arg5, %16 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%15 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%18 = arith.muli %in_0, %in_1 : i32 | |
%19 = arith.subi %in, %18 : i32 | |
%20 = arith.muli %in_2, %in_3 : i32 | |
%21 = arith.subi %19, %20 : i32 | |
%22 = arith.addi %21, %in_4 : i32 | |
linalg.yield %22 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %17, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%12 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%12 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = linalg.mmt4d ins(%11, %12 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%13 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %14, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = flow.dispatch.workgroups(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%11 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%12 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%13 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%14 = arith.muli %arg5, %arg6 : i32 | |
%15 = tensor.empty() : tensor<256x256xi32> | |
%16 = arith.muli %14, %c256_i32 : i32 | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %15 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg6, %13, %arg5, %16 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%15 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%18 = arith.muli %in_0, %in_1 : i32 | |
%19 = arith.subi %in, %18 : i32 | |
%20 = arith.muli %in_2, %in_3 : i32 | |
%21 = arith.subi %19, %20 : i32 | |
%22 = arith.addi %21, %in_4 : i32 | |
linalg.yield %22 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %17, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%4 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %11 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %12 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%5 = flow.dispatch.workgroups(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%12 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg6: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg7: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%12 = flow.dispatch.tensor.load %arg6, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%13 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%14 = linalg.mmt4d ins(%11, %12 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%13 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %14, %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups(%0) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups(%1) : (tensor<256x256xi8>) -> tensor<256xi32> = | |
(%arg5: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%11 = flow.dispatch.tensor.load %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%12 = tensor.empty() : tensor<256xi32> | |
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<256xi32>) -> tensor<256xi32> | |
%14 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%11 : tensor<256x256xi8>) outs(%13 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%15 = arith.extsi %in : i8 to i32 | |
%16 = arith.addi %15, %out : i32 | |
linalg.yield %16 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %14, %arg6, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = flow.dispatch.workgroups(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> = | |
(%arg5: i32, %arg6: i32, %arg7: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg8: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg9: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg10: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%11 = flow.dispatch.tensor.load %arg7, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%12 = flow.dispatch.tensor.load %arg8, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%13 = flow.dispatch.tensor.load %arg9, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%14 = arith.muli %arg5, %arg6 : i32 | |
%15 = tensor.empty() : tensor<256x256xi32> | |
%16 = arith.muli %14, %c256_i32 : i32 | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %15 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%17 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %12, %arg6, %13, %arg5, %16 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%15 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%18 = arith.muli %in_0, %in_1 : i32 | |
%19 = arith.subi %in, %18 : i32 | |
%20 = arith.muli %in_2, %in_3 : i32 | |
%21 = arith.subi %19, %20 : i32 | |
%22 = arith.addi %21, %in_4 : i32 | |
linalg.yield %22 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %17, %arg10, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineConstantsPass (iree-flow-outline-constants) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %1 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi32>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%1 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %1 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %arg1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>>, %arg2: !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>>) { | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%3 = linalg.mmt4d ins(%0, %1 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%2 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %3, %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !flow.dispatch.tensor<readonly:tensor<256x256xi8>>, %arg1: !flow.dispatch.tensor<writeonly:tensor<256xi32>>) { | |
%c0_i32 = arith.constant 0 : i32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%1 = tensor.empty() : tensor<256xi32> | |
%2 = linalg.fill ins(%c0_i32 : i32) outs(%1 : tensor<256xi32>) -> tensor<256xi32> | |
%3 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<256x256xi8>) outs(%2 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%4 = arith.extsi %in : i8 to i32 | |
%5 = arith.addi %4, %out : i32 | |
linalg.yield %5 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %3, %arg1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
flow.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
flow.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<256xi32>>, %arg5: !flow.dispatch.tensor<writeonly:tensor<256x256xi32>>) { | |
%c256_i32 = arith.constant 256 : i32 | |
%0 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%1 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%2 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%3 = arith.muli %arg0, %arg1 : i32 | |
%4 = tensor.empty() : tensor<256x256xi32> | |
%5 = arith.muli %3, %c256_i32 : i32 | |
%unpack = tensor.unpack %0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %4 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%6 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %1, %arg1, %2, %arg0, %5 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%4 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%7 = arith.muli %in_0, %in_1 : i32 | |
%8 = arith.subi %in, %7 : i32 | |
%9 = arith.muli %in_2, %in_3 : i32 | |
%10 = arith.subi %8, %9 : i32 | |
%11 = arith.addi %10, %in_4 : i32 | |
linalg.yield %11 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %6, %arg5, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<256x256xi8> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<256x256xi8> | |
%2 = hal.tensor.import %arg4 "input4" : !hal.buffer_view -> tensor<256x256xi32> | |
%3 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%0) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%1) : (tensor<256x256xi8>) -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%2) : (tensor<256x256xi32>) -> tensor<16x16x16x16xi32> | |
%6 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%3, %4, %5) : (tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>, tensor<16x16x16x16xi32>) -> %5 | |
%7 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%0) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%8 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%1) : (tensor<256x256xi8>) -> tensor<256xi32> | |
%9 = flow.dispatch @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %6, %7, %8) : (i32, i32, tensor<16x16x16x16xi32>, tensor<256xi32>, tensor<256xi32>) -> tensor<256x256xi32> | |
%10 = hal.tensor.export %9 "output0" : tensor<256x256xi32> -> !hal.buffer_view | |
util.return %10 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%c0_i32 = arith.constant 0 : i32 | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%c0_i32 = arith.constant 0 : i32 | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%c256_i32 = arith.constant 256 : i32 | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c256 = arith.constant 256 : index | |
%c256_0 = arith.constant 256 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256_0]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i8_1 = hal.element_type<i8> : i32 | |
%dense_row_major_2 = hal.encoding_type<dense_row_major> : i32 | |
%c256_3 = arith.constant 256 : index | |
%c256_4 = arith.constant 256 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256_3, %c256_4]) type(%element_type_i8_1) encoding(%dense_row_major_2) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
%dense_row_major_5 = hal.encoding_type<dense_row_major> : i32 | |
%c256_6 = arith.constant 256 : index | |
%c256_7 = arith.constant 256 : index | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256_6, %c256_7]) type(%element_type_i32) encoding(%dense_row_major_5) | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6} | |
%c0 = arith.constant 0 : index | |
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%9} | |
%c0_8 = arith.constant 0 : index | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%5[%c0_8 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%11} | |
%c0_9 = arith.constant 0 : index | |
%13 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%8[%c0_9 to %6 for %6]) : (!stream.resource<*>{%6}) -> !stream.resource<*>{%13} | |
%c0_10 = arith.constant 0 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%10[%c0_10 to %9 for %9], %12[%c0_10 to %11 for %11], %14[%c0_10 to %13 for %13]) : (!stream.resource<*>{%9}, !stream.resource<*>{%11}, !stream.resource<*>{%13}) -> %14{%13} | |
%c0_11 = arith.constant 0 : index | |
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0_11 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%16} | |
%c0_12 = arith.constant 0 : index | |
%18 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%5[%c0_12 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%18} | |
%c0_13 = arith.constant 0 : index | |
%20 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %15[%c0_13 to %13 for %13], %17[%c0_13 to %16 for %16], %19[%c0_13 to %18 for %18]) : (i32, i32, !stream.resource<*>{%13}, !stream.resource<*>{%16}, !stream.resource<*>{%18}) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<256x256xi32> in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%c0_i32 = arith.constant 0 : i32 | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%c0_i32 = arith.constant 0 : i32 | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%c256_i32 = arith.constant 256 : i32 | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c256 = arith.constant 256 : index | |
%c256_0 = arith.constant 256 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256_0]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i8_1 = hal.element_type<i8> : i32 | |
%dense_row_major_2 = hal.encoding_type<dense_row_major> : i32 | |
%c256_3 = arith.constant 256 : index | |
%c256_4 = arith.constant 256 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256_3, %c256_4]) type(%element_type_i8_1) encoding(%dense_row_major_2) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
%dense_row_major_5 = hal.encoding_type<dense_row_major> : i32 | |
%c256_6 = arith.constant 256 : index | |
%c256_7 = arith.constant 256 : index | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256_6, %c256_7]) type(%element_type_i32) encoding(%dense_row_major_5) | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6} | |
%c0 = arith.constant 0 : index | |
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%9} | |
%c0_8 = arith.constant 0 : index | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%5[%c0_8 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%11} | |
%c0_9 = arith.constant 0 : index | |
%13 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%8[%c0_9 to %6 for %6]) : (!stream.resource<*>{%6}) -> !stream.resource<*>{%13} | |
%c0_10 = arith.constant 0 : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%10[%c0_10 to %9 for %9], %12[%c0_10 to %11 for %11], %14[%c0_10 to %13 for %13]) : (!stream.resource<*>{%9}, !stream.resource<*>{%11}, !stream.resource<*>{%13}) -> %14{%13} | |
%c0_11 = arith.constant 0 : index | |
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0_11 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%16} | |
%c0_12 = arith.constant 0 : index | |
%18 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%5[%c0_12 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%18} | |
%c0_13 = arith.constant 0 : index | |
%20 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %15[%c0_13 to %13 for %13], %17[%c0_13 to %16 for %16], %19[%c0_13 to %18 for %18]) : (i32, i32, !stream.resource<*>{%13}, !stream.resource<*>{%16}, !stream.resource<*>{%18}) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<256x256xi32> in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i8_0 = hal.element_type<i8> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
%dense_row_major_2 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major_2) | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6} | |
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%5[%c0 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%8[%c0 to %6 for %6]) : (!stream.resource<*>{%6}) -> !stream.resource<*>{%13} | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %14[%c0 to %13 for %13]) : (!stream.resource<*>{%9}, !stream.resource<*>{%11}, !stream.resource<*>{%13}) -> %14{%13} | |
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%16} | |
%18 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%5[%c0 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%18} | |
%20 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %15[%c0 to %13 for %13], %17[%c0 to %16 for %16], %19[%c0 to %18 for %18]) : (i32, i32, !stream.resource<*>{%13}, !stream.resource<*>{%16}, !stream.resource<*>{%18}) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<256x256xi32> in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i8_0 = hal.element_type<i8> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
%dense_row_major_2 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major_2) | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6} | |
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%5[%c0 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%8[%c0 to %6 for %6]) : (!stream.resource<*>{%6}) -> !stream.resource<*>{%13} | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %14[%c0 to %13 for %13]) : (!stream.resource<*>{%9}, !stream.resource<*>{%11}, !stream.resource<*>{%13}) -> %14{%13} | |
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%16} | |
%18 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%5[%c0 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%18} | |
%20 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %15[%c0 to %13 for %13], %17[%c0 to %16 for %16], %19[%c0 to %18 for %18]) : (i32, i32, !stream.resource<*>{%13}, !stream.resource<*>{%16}, !stream.resource<*>{%18}) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<256x256xi32> in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i8_0 = hal.element_type<i8> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
%dense_row_major_2 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major_2) | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6} | |
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%5[%c0 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%8[%c0 to %6 for %6]) : (!stream.resource<*>{%6}) -> !stream.resource<*>{%13} | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %14[%c0 to %13 for %13]) : (!stream.resource<*>{%9}, !stream.resource<*>{%11}, !stream.resource<*>{%13}) -> %14{%13} | |
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%16} | |
%18 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%19 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%5[%c0 to %3 for %3]) : (!stream.resource<*>{%3}) -> !stream.resource<*>{%18} | |
%20 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%21 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %15[%c0 to %13 for %13], %17[%c0 to %16 for %16], %19[%c0 to %18 for %18]) : (i32, i32, !stream.resource<*>{%13}, !stream.resource<*>{%16}, !stream.resource<*>{%18}) -> !stream.resource<*>{%20} | |
%22 = stream.async.transfer %21 : !stream.resource<*>{%20} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%20} | |
%23 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %22 : tensor<256x256xi32> in !stream.resource<external>{%20} -> !hal.buffer_view | |
util.return %23 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%7[%c0 to %5 for %5]) : (!stream.resource<*>{%5}) -> !stream.resource<*>{%11} | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%9[%c0 to %8 for %8], %10[%c0 to %8 for %8], %12[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%8}, !stream.resource<*>{%11}) -> %12{%11} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%16 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %13[%c0 to %11 for %11], %15[%c0 to %14 for %14], %16[%c0 to %14 for %14]) : (i32, i32, !stream.resource<*>{%11}, !stream.resource<*>{%14}, !stream.resource<*>{%14}) -> !stream.resource<*>{%5} | |
%18 = stream.async.transfer %17 : !stream.resource<*>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%5} | |
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<256x256xi32> in !stream.resource<external>{%5} -> !hal.buffer_view | |
util.return %19 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%7[%c0 to %5 for %5]) : (!stream.resource<*>{%5}) -> !stream.resource<*>{%11} | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%9[%c0 to %8 for %8], %10[%c0 to %8 for %8], %12[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%8}, !stream.resource<*>{%11}) -> %12{%11} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%16 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %13[%c0 to %11 for %11], %15[%c0 to %14 for %14], %16[%c0 to %14 for %14]) : (i32, i32, !stream.resource<*>{%11}, !stream.resource<*>{%14}, !stream.resource<*>{%14}) -> !stream.resource<*>{%5} | |
%18 = stream.async.transfer %17 : !stream.resource<*>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%5} | |
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<256x256xi32> in !stream.resource<external>{%5} -> !hal.buffer_view | |
util.return %19 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%7[%c0 to %5 for %5]) : (!stream.resource<*>{%5}) -> !stream.resource<*>{%11} | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%9[%c0 to %8 for %8], %10[%c0 to %8 for %8], %12[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%8}, !stream.resource<*>{%11}) -> %12{%11} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%16 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %13[%c0 to %11 for %11], %15[%c0 to %14 for %14], %16[%c0 to %14 for %14]) : (i32, i32, !stream.resource<*>{%11}, !stream.resource<*>{%14}, !stream.resource<*>{%14}) -> !stream.resource<*>{%5} | |
%18 = stream.async.transfer %17 : !stream.resource<*>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%5} | |
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<256x256xi32> in !stream.resource<external>{%5} -> !hal.buffer_view | |
util.return %19 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%7[%c0 to %5 for %5]) : (!stream.resource<*>{%5}) -> !stream.resource<*>{%11} | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%9[%c0 to %8 for %8], %10[%c0 to %8 for %8], %12[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%8}, !stream.resource<*>{%11}) -> %12{%11} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%16 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %13[%c0 to %11 for %11], %15[%c0 to %14 for %14], %16[%c0 to %14 for %14]) : (i32, i32, !stream.resource<*>{%11}, !stream.resource<*>{%14}, !stream.resource<*>{%14}) -> !stream.resource<*>{%5} | |
%18 = stream.async.transfer %17 : !stream.resource<*>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%5} | |
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<256x256xi32> in !stream.resource<external>{%5} -> !hal.buffer_view | |
util.return %19 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%7[%c0 to %5 for %5]) : (!stream.resource<*>{%5}) -> !stream.resource<*>{%11} | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%9[%c0 to %8 for %8], %10[%c0 to %8 for %8], %12[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%8}, !stream.resource<*>{%11}) -> %12{%11} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%16 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %13[%c0 to %11 for %11], %15[%c0 to %14 for %14], %16[%c0 to %14 for %14]) : (i32, i32, !stream.resource<*>{%11}, !stream.resource<*>{%14}, !stream.resource<*>{%14}) -> !stream.resource<*>{%5} | |
%18 = stream.async.transfer %17 : !stream.resource<*>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%5} | |
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<256x256xi32> in !stream.resource<external>{%5} -> !hal.buffer_view | |
util.return %19 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%7[%c0 to %5 for %5]) : (!stream.resource<*>{%5}) -> !stream.resource<*>{%11} | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%9[%c0 to %8 for %8], %10[%c0 to %8 for %8], %12[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%8}, !stream.resource<*>{%11}) -> %12{%11} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%16 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %13[%c0 to %11 for %11], %15[%c0 to %14 for %14], %16[%c0 to %14 for %14]) : (i32, i32, !stream.resource<*>{%11}, !stream.resource<*>{%14}, !stream.resource<*>{%14}) -> !stream.resource<*>{%5} | |
%18 = stream.async.transfer %17 : !stream.resource<*>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%5} | |
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<256x256xi32> in !stream.resource<external>{%5} -> !hal.buffer_view | |
util.return %19 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%7[%c0 to %5 for %5]) : (!stream.resource<*>{%5}) -> !stream.resource<*>{%11} | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%9[%c0 to %8 for %8], %10[%c0 to %8 for %8], %12[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%8}, !stream.resource<*>{%11}) -> %12{%11} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%16 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %13[%c0 to %11 for %11], %15[%c0 to %14 for %14], %16[%c0 to %14 for %14]) : (i32, i32, !stream.resource<*>{%11}, !stream.resource<*>{%14}, !stream.resource<*>{%14}) -> !stream.resource<*>{%5} | |
%18 = stream.async.transfer %17 : !stream.resource<*>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%5} | |
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<256x256xi32> in !stream.resource<external>{%5} -> !hal.buffer_view | |
util.return %19 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CombineInitializers (iree-util-combine-initializers) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi8> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%3 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%0} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256x256xi32> : index | |
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5} | |
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x128x16x2xi8> : index | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%8} | |
%11 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<16x16x16x16xi32> : index | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%7[%c0 to %5 for %5]) : (!stream.resource<*>{%5}) -> !stream.resource<*>{%11} | |
%13 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%9[%c0 to %8 for %8], %10[%c0 to %8 for %8], %12[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%8}, !stream.resource<*>{%11}) -> %12{%11} | |
%14 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<256xi32> : index | |
%15 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%2[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%16 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%4[%c0 to %0 for %0]) : (!stream.resource<*>{%0}) -> !stream.resource<*>{%14} | |
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %13[%c0 to %11 for %11], %15[%c0 to %14 for %14], %16[%c0 to %14 for %14]) : (i32, i32, !stream.resource<*>{%11}, !stream.resource<*>{%14}, !stream.resource<*>{%14}) -> !stream.resource<*>{%5} | |
%18 = stream.async.transfer %17 : !stream.resource<*>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%5} | |
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<256x256xi32> in !stream.resource<external>{%5} -> !hal.buffer_view | |
util.return %19 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> ()>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0)>], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c1024 = arith.constant 1024 : index | |
%c262144 = arith.constant 262144 : index | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%c262144} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c262144} | |
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%5[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c262144}) -> !stream.resource<*>{%c262144} | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%6[%c0 to %c65536 for %c65536], %7[%c0 to %c65536 for %c65536], %8[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}, !stream.resource<*>{%c262144}) -> %8{%c262144} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %9[%c0 to %c262144 for %c262144], %10[%c0 to %c1024 for %c1024], %11[%c0 to %c1024 for %c1024]) : (i32, i32, !stream.resource<*>{%c262144}, !stream.resource<*>{%c1024}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c262144} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c262144} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<256x256xi32> in !stream.resource<external>{%c262144} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c1024 = arith.constant 1024 : index | |
%c262144 = arith.constant 262144 : index | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%c262144} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c262144} | |
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%5[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c262144}) -> !stream.resource<*>{%c262144} | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%6[%c0 to %c65536 for %c65536], %7[%c0 to %c65536 for %c65536], %8[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}, !stream.resource<*>{%c262144}) -> %8{%c262144} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %9[%c0 to %c262144 for %c262144], %10[%c0 to %c1024 for %c1024], %11[%c0 to %c1024 for %c1024]) : (i32, i32, !stream.resource<*>{%c262144}, !stream.resource<*>{%c1024}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c262144} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c262144} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<256x256xi32> in !stream.resource<external>{%c262144} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c1024 = arith.constant 1024 : index | |
%c262144 = arith.constant 262144 : index | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%c262144} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c262144} | |
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%5[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c262144}) -> !stream.resource<*>{%c262144} | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%6[%c0 to %c65536 for %c65536], %7[%c0 to %c65536 for %c65536], %8[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}, !stream.resource<*>{%c262144}) -> %8{%c262144} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %9[%c0 to %c262144 for %c262144], %10[%c0 to %c1024 for %c1024], %11[%c0 to %c1024 for %c1024]) : (i32, i32, !stream.resource<*>{%c262144}, !stream.resource<*>{%c1024}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c262144} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c262144} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<256x256xi32> in !stream.resource<external>{%c262144} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c1024 = arith.constant 1024 : index | |
%c262144 = arith.constant 262144 : index | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%c262144} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c262144} | |
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%5[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c262144}) -> !stream.resource<*>{%c262144} | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%6[%c0 to %c65536 for %c65536], %7[%c0 to %c65536 for %c65536], %8[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}, !stream.resource<*>{%c262144}) -> %8{%c262144} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %9[%c0 to %c262144 for %c262144], %10[%c0 to %c1024 for %c1024], %11[%c0 to %c1024 for %c1024]) : (i32, i32, !stream.resource<*>{%c262144}, !stream.resource<*>{%c1024}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c262144} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c262144} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<256x256xi32> in !stream.resource<external>{%c262144} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c1024 = arith.constant 1024 : index | |
%c262144 = arith.constant 262144 : index | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%c262144} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c262144} | |
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%5[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c262144}) -> !stream.resource<*>{%c262144} | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%6[%c0 to %c65536 for %c65536], %7[%c0 to %c65536 for %c65536], %8[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}, !stream.resource<*>{%c262144}) -> %8{%c262144} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %9[%c0 to %c262144 for %c262144], %10[%c0 to %c1024 for %c1024], %11[%c0 to %c1024 for %c1024]) : (i32, i32, !stream.resource<*>{%c262144}, !stream.resource<*>{%c1024}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c262144} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c262144} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<256x256xi32> in !stream.resource<external>{%c262144} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c1024 = arith.constant 1024 : index | |
%c262144 = arith.constant 262144 : index | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%c262144} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c262144} | |
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%5[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c262144}) -> !stream.resource<*>{%c262144} | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%6[%c0 to %c65536 for %c65536], %7[%c0 to %c65536 for %c65536], %8[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}, !stream.resource<*>{%c262144}) -> %8{%c262144} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %9[%c0 to %c262144 for %c262144], %10[%c0 to %c1024 for %c1024], %11[%c0 to %c1024 for %c1024]) : (i32, i32, !stream.resource<*>{%c262144}, !stream.resource<*>{%c1024}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c262144} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c262144} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<256x256xi32> in !stream.resource<external>{%c262144} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c1024 = arith.constant 1024 : index | |
%c262144 = arith.constant 262144 : index | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%c262144} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c262144} | |
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%5[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c262144}) -> !stream.resource<*>{%c262144} | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%6[%c0 to %c65536 for %c65536], %7[%c0 to %c65536 for %c65536], %8[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}, !stream.resource<*>{%c262144}) -> %8{%c262144} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %9[%c0 to %c262144 for %c262144], %10[%c0 to %c1024 for %c1024], %11[%c0 to %c1024 for %c1024]) : (i32, i32, !stream.resource<*>{%c262144}, !stream.resource<*>{%c1024}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c262144} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c262144} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<256x256xi32> in !stream.resource<external>{%c262144} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> | |
%1 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%2 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256xi32>> | |
%3 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%5 = flow.dispatch.tensor.load %1, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%6 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [256], strides = [1] : !flow.dispatch.tensor<readonly:tensor<256xi32>> -> tensor<256xi32> | |
%7 = arith.muli %arg0, %arg1 : i32 | |
%8 = tensor.empty() : tensor<256x256xi32> | |
%9 = arith.muli %7, %c256_i32 : i32 | |
%unpack = tensor.unpack %4 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16x16x16xi32> -> tensor<256x256xi32> | |
%10 = linalg.generic {indexing_maps = [#map, #map1, #map3, #map4, #map3, #map3, #map], iterator_types = ["parallel", "parallel"]} ins(%unpack, %5, %arg1, %6, %arg0, %9 : tensor<256x256xi32>, tensor<256xi32>, i32, tensor<256xi32>, i32, i32) outs(%8 : tensor<256x256xi32>) { | |
^bb0(%in: i32, %in_0: i32, %in_1: i32, %in_2: i32, %in_3: i32, %in_4: i32, %out: i32): | |
%11 = arith.muli %in_0, %in_1 : i32 | |
%12 = arith.subi %in, %11 : i32 | |
%13 = arith.muli %in_2, %in_3 : i32 | |
%14 = arith.subi %12, %13 : i32 | |
%15 = arith.addi %14, %in_4 : i32 | |
linalg.yield %15 : i32 | |
} -> tensor<256x256xi32> | |
flow.dispatch.tensor.store %10, %3, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : tensor<256x256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256x256xi32>> | |
return | |
} | |
} | |
} | |
util.func public @quantized_matmul_neither_zp_0_dynamic(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: i32, %arg3: i32, %arg4: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @quantized_matmul_neither_zp_0_dynamic(%input0: tensor<256x256xi8>, %input1: tensor<256x256xi8>, %input2: i32, %input3: i32, %input4: tensor<256x256xi32>) -> (%output0: tensor<256x256xi32>)"}} { | |
%c1024 = arith.constant 1024 : index | |
%c262144 = arith.constant 262144 : index | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%element_type_i8 = hal.element_type<i8> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c256, %c256]) type(%element_type_i8) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<256x256xi8> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%element_type_i32 = hal.element_type<i32> : i32 | |
hal.buffer_view.assert<%arg4 : !hal.buffer_view> message("input4") shape([%c256, %c256]) type(%element_type_i32) encoding(%dense_row_major) | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg4 : !hal.buffer_view -> tensor<256x256xi32> in !stream.resource<external>{%c262144} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c262144} | |
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_0::@quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_1::@quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_2::@quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%5[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c262144}) -> !stream.resource<*>{%c262144} | |
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_3::@quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%6[%c0 to %c65536 for %c65536], %7[%c0 to %c65536 for %c65536], %8[%c0 to %c262144 for %c262144]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}, !stream.resource<*>{%c262144}) -> %8{%c262144} | |
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_4::@quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%1[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_5::@quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}) -> !stream.resource<*>{%c1024} | |
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @quantized_matmul_neither_zp_0_dynamic_dispatch_6::@quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg2, %arg3, %9[%c0 to %c262144 for %c262144], %10[%c0 to %c1024 for %c1024], %11[%c0 to %c1024 for %c1024]) : (i32, i32, !stream.resource<*>{%c262144}, !stream.resource<*>{%c1024}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c262144} | |
%13 = stream.async.transfer %12 : !stream.resource<*>{%c262144} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c262144} | |
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<256x256xi32> in !stream.resource<external>{%c262144} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1) -> (d0, d1)> | |
#map1 = affine_map<(d0, d1) -> (d0)> | |
#map2 = affine_map<(d0, d1) -> (d1, d0)> | |
#map3 = affine_map<(d0, d1) -> ()> | |
#map4 = affine_map<(d0, d1) -> (d1)> | |
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_local | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_0 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_0_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_1 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_1_pack_i8(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<16x128x16x2xi8> | |
%pack = tensor.pack %2 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [16, 2] into %3 : tensor<256x256xi8> -> tensor<16x128x16x2xi8> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : tensor<16x128x16x2xi8> -> !flow.dispatch.tensor<writeonly:tensor<16x128x16x2xi8>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_2 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_2_pack_i32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi32>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi32>> -> tensor<256x256xi32> | |
%3 = tensor.empty() : tensor<16x16x16x16xi32> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %3 : tensor<256x256xi32> -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<writeonly:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_3 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_3_mmt4d_16x16x128x16x16x2_i8xi8xi32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [16, 128, 16, 2], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<16x128x16x2xi8>> -> tensor<16x128x16x2xi8> | |
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> -> tensor<16x16x16x16xi32> | |
%6 = linalg.mmt4d ins(%3, %4 : tensor<16x128x16x2xi8>, tensor<16x128x16x2xi8>) outs(%5 : tensor<16x16x16x16xi32>) -> tensor<16x16x16x16xi32> | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0, 0], sizes = [16, 16, 16, 16], strides = [1, 1, 1, 1] : tensor<16x16x16x16xi32> -> !flow.dispatch.tensor<readwrite:tensor<16x16x16x16xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_4 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_4_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_5 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_5_generic_256x256_i8xi32(%arg0: !stream.binding, %arg1: !stream.binding) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<256x256xi8>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
%2 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [256, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x256xi8>> -> tensor<256x256xi8> | |
%3 = tensor.empty() : tensor<256xi32> | |
%4 = linalg.fill ins(%c0_i32 : i32) outs(%3 : tensor<256xi32>) -> tensor<256xi32> | |
%5 = linalg.generic {indexing_maps = [#map2, #map1], iterator_types = ["parallel", "reduction"]} ins(%2 : tensor<256x256xi8>) outs(%4 : tensor<256xi32>) { | |
^bb0(%in: i8, %out: i32): | |
%6 = arith.extsi %in : i8 to i32 | |
%7 = arith.addi %6, %out : i32 | |
linalg.yield %7 : i32 | |
} -> tensor<256xi32> | |
flow.dispatch.tensor.store %5, %1, offsets = [0], sizes = [256], strides = [1] : tensor<256xi32> -> !flow.dispatch.tensor<writeonly:tensor<256xi32>> | |
return | |
} | |
} | |
} | |
stream.executable private @quantized_matmul_neither_zp_0_dynamic_dispatch_6 { | |
stream.executable.export public @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @quantized_matmul_neither_zp_0_dynamic_dispatch_6_unpack_elementwise_256x256_i32(%arg0: i32, %arg1: i32, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: !stream.binding) { | |
%c256_i32 = arith.constant 256 : i32 | |
%c0 = arith.constant 0 : index | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment