Skip to content

Instantly share code, notes, and snippets.

@pashu123
Created April 24, 2024 12:58
Show Gist options
  • Save pashu123/4603a735deabd7d1395c89cdbd3cb281 to your computer and use it in GitHub Desktop.
Save pashu123/4603a735deabd7d1395c89cdbd3cb281 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
// -----// IR Dump After AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
func.func @pack(%arg0: tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%dim = tensor.dim %arg0, %c0 : tensor<?x8640x3200xf16>
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %arg0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
return %pack : tensor<?x540x3200x16x1xf16>
}
}
// -----// IR Dump After AutoInputConversionPipeline (iree-auto-input-conversion) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
func.func @pack(%arg0: tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%dim = tensor.dim %arg0, %c0 : tensor<?x8640x3200xf16>
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %arg0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
return %pack : tensor<?x540x3200x16x1xf16>
}
}
// -----// IR Dump After IREEImportPublic (iree-import-public) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%dim = tensor.dim %arg0, %c0 : tensor<?x8640x3200xf16>
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %arg0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
util.return %pack : tensor<?x540x3200x16x1xf16>
}
}
// -----// IR Dump After ImportMLProgram (iree-import-ml-program) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%dim = tensor.dim %arg0, %c0 : tensor<?x8640x3200xf16>
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %arg0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
util.return %pack : tensor<?x540x3200x16x1xf16>
}
}
// -----// IR Dump After SanitizeModuleNames (iree-sanitize-module-names) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%dim = tensor.dim %arg0, %c0 : tensor<?x8640x3200xf16>
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %arg0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
util.return %pack : tensor<?x540x3200x16x1xf16>
}
}
// -----// IR Dump After ConvertMeshToFlow (iree-convert-mesh-to-flow) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?x8640x3200xf16>
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %arg0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
util.return %pack : tensor<?x540x3200x16x1xf16>
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?x8640x3200xf16>
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %arg0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
util.return %pack : tensor<?x540x3200x16x1xf16>
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = util.call @_pack(%1) : (tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16>
%c0 = arith.constant 0 : index
%dim = tensor.dim %2, %c0 : tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%dim} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
util.func private @_pack(%arg0: tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?x8640x3200xf16>
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %arg0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
util.return %pack : tensor<?x540x3200x16x1xf16>
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_pack(%arg0: tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?x8640x3200xf16>
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %arg0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
util.return %pack : tensor<?x540x3200x16x1xf16>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = util.call @_pack(%1) : (tensor<?x8640x3200xf16>) -> tensor<?x540x3200x16x1xf16>
%dim = tensor.dim %2, %c0 : tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%dim} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Inliner (inline) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After DemoteF64ToF32 (iree-util-demote-f64-to-f32) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After RemoveZeroExtentTensors (iree-global-opt-remove-zero-extent-tensors) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After DetachElementwiseFromNamedOps (iree-global-opt-detach-elementwise-from-named-ops) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Convert1X1FilterConv2DToMatmul (iree-global-opt-convert-1x1-filter-conv2d-to-matmul) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After EraseUnusedLinalgOperands (iree-global-opt-erase-unused-linalg-operands) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After ExpandTensorShapes (iree-global-opt-expand-tensor-shapes) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOps (iree-global-opt-raise-special-ops) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After DecomposeConcat (iree-global-opt-decompose-concat) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOps (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After FoldUnitExtentDimsPass (iree-flow-fold-unit-extent-dims) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After FuseDequantizationMatmul (iree-global-opt-fuse-dequantization-matmul) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SetEncoding (iree-global-opt-set-encoding) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After CPUMaterializeUpperBoundTileSize (iree-codegen-cpu-materialize-upper-bound-tile-size) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CPUMaterializeEncoding (iree-codegen-cpu-materialize-encoding) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After MaterializeHomogeneousEncodings (iree-global-opt-materialize-homogeneous-encodings) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After SimplifyPackUnpack (iree-global-opt-simplify-pack-unpack) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After DataLayoutPropagation (iree-global-opt-data-layout-propagation) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOps (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After GlobalLoopInvariantCodeMotion (iree-global-opt-loop-invariant-code-motion) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After HoistIntoGlobals (iree-util-hoist-into-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After JitGlobals (iree-consteval-jit-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOps (iree-global-opt-raise-special-ops) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-flow-tensor-pad-to-tensor-insert-slice) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After InterchangeGenericOpsPass (iree-flow-interchange-generic-ops) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ResolveShapedTypeResultDims (resolve-shaped-type-result-dims) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-flow-elementwise-op-fusion) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExpandShapesPass (iree-flow-bubble-up-expand-shapes) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-flow-elementwise-op-fusion) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After FusionOfTensorOpsPass (iree-flow-fusion-of-tensor-ops) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SplitReductionPass (iree-flow-split-reduction-ops) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After InterchangeGenericOpsPass (iree-flow-interchange-generic-ops) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After FormScalarDispatchesPass (iree-flow-form-scalar-dispatches) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
%3 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After FormDispatchRegionsPass (iree-flow-form-dispatch-regions) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%c0 = arith.constant 0 : index
%3 = flow.dispatch.region -> (tensor<?x540x3200x16x1xf16>{%0}) {
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.return %pack : tensor<?x540x3200x16x1xf16>
}
%4 = hal.tensor.export %3 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-flow-clone-producers-into-dispatch-regions) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%c0 = arith.constant 0 : index
%3 = flow.dispatch.region -> (tensor<?x540x3200x16x1xf16>{%0}) {
%5 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %5 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.return %pack : tensor<?x540x3200x16x1xf16>
}
%4 = hal.tensor.export %3 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CollapseDimensionsPass (iree-flow-collapse-dimensions) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%c0 = arith.constant 0 : index
%3 = flow.dispatch.region -> (tensor<?x540x3200x16x1xf16>{%0}) {
%5 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %1 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %5 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.return %pack : tensor<?x540x3200x16x1xf16>
}
%4 = hal.tensor.export %3 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After FormDispatchWorkgroupsPass (iree-flow-form-dispatch-workgroups) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0} =
(%arg1: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg2: index, %arg3: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%4 = flow.dispatch.workload.ordinal %arg2, 0 : index
%5 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [%4, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%4} -> tensor<?x8640x3200xf16>
%6 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %6 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4}
flow.return
} count(%arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0} =
(%arg1: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg2: index, %arg3: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%4 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%arg2}
%5 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%arg2}
%6 = flow.dispatch.workload.ordinal %arg2, 0 : index
%7 = flow.dispatch.tensor.load %4, offsets = [0, 0, 0], sizes = [%6, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x8640x3200xf16>
%8 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %7 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %5, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
flow.return
} count(%arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0} =
(%arg1: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg2: index, %arg3: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%4 = flow.dispatch.workload.ordinal %arg2, 0 : index
%5 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%4}
%6 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4}
%7 = flow.dispatch.tensor.load %5, offsets = [0, 0, 0], sizes = [%4, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%4} -> tensor<?x8640x3200xf16>
%8 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %7 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %6, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4}
flow.return
} count(%arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0} =
(%arg1: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg2: index, %arg3: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%4 = flow.dispatch.workload.ordinal %arg2, 0 : index
%5 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%4}
%6 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4}
%7 = flow.dispatch.tensor.load %5, offsets = [0, 0, 0], sizes = [%4, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%4} -> tensor<?x8640x3200xf16>
%8 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %7 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %6, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4}
flow.return
} count(%arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0} =
(%arg1: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg2: index, %arg3: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%4 = flow.dispatch.workload.ordinal %arg2, 0 : index
%5 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%4}
%6 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4}
%7 = flow.dispatch.tensor.load %5, offsets = [0, 0, 0], sizes = [%4, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%4} -> tensor<?x8640x3200xf16>
%8 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %7 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %6, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4}
flow.return
} count(%arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0} =
(%arg1: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg2: index, %arg3: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%4 = flow.dispatch.workload.ordinal %arg2, 0 : index
%5 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%4}
%6 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4}
%7 = flow.dispatch.tensor.load %5, offsets = [0, 0, 0], sizes = [%4, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%4} -> tensor<?x8640x3200xf16>
%8 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %7 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %6, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4}
flow.return
} count(%arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- //
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
// -----// IR Dump After CSE (cse) //----- //
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After OutlineConstants (iree-util-outline-constants) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
flow.executable private @pack_dispatch_0 {
flow.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) {
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0}
%2 = flow.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%1, %0) : (tensor<?x8640x3200xf16>{%0}, index) -> tensor<?x540x3200x16x1xf16>{%0}
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view
util.return %3 : !hal.buffer_view
}
}
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c8640 = arith.constant 8640 : index
%c3200 = arith.constant 3200 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%c0 = arith.constant 0 : index
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c8640 = arith.constant 8640 : index
%c3200 = arith.constant 3200 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%c0 = arith.constant 0 : index
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After CombineInitializers (iree-util-combine-initializers) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = stream.tensor.sizeof tensor<?x8640x3200xf16>{%0} : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- //
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = arith.muli %0, %c55296000 : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = arith.muli %0, %c55296000 : index
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4}
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4}
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1}
%4 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%1}
%5 = stream.async.transfer %4 : !stream.resource<*>{%1} -> !stream.resource<external>{%1}
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%2[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%2[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%2[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%2[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%2[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%2[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%2[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%2[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%2[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.timepoint.immediate => !stream.timepoint
%results, %result_timepoint = stream.async.execute await(%3) => with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%6 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %6 : !stream.resource<external>{%1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%3 = stream.timepoint.immediate => !stream.timepoint
%results, %result_timepoint = stream.async.execute await(%3) => with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%6 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %6 : !stream.resource<external>{%1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%results, %result_timepoint = stream.async.execute with(%2 as %arg1: !stream.resource<external>{%1}) -> !stream.resource<external>{%1} {
%5 = stream.async.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%arg1[%c0 to %1 for %1], %0) : (!stream.resource<external>{%1}, index) -> !stream.resource<external>{%1}
stream.yield %5 : !stream.resource<external>{%1}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ScheduleAllocationPass (iree-stream-schedule-allocation) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0_0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0_0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0_0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After PropagateSubranges (iree-util-propagate-subranges) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0_0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After SCFToControlFlow (convert-scf-to-cf) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After ElideTimepointsPass (iree-stream-elide-timepoints) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%0 : index) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseDispatchBindingsPass (iree-stream-fuse-dispatch-bindings) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg4, 0 : index
%1 = stream.binding.subspan %arg0[%arg2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg1[%arg3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0, %c0, %0 : index, index, index) {
ro %arg1[%c0_0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0_0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchArgumentsPass (iree-stream-annotate-dispatch-arguments) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: index {stream.values = [0 : index]}, %arg3: index {stream.values = [0 : index]}, %arg4: index) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg4, 0 : index
%1 = stream.binding.subspan %arg0[%arg2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0}
%2 = stream.binding.subspan %arg1[%arg3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
%3 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [%0, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%0} -> tensor<?x8640x3200xf16>
%4 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %3 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%3 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0, %c0, %0 : index, index, index) {
ro %arg1[%c0_0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0_0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%1}
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After PackDispatchOperandsPass (iree-stream-pack-dispatch-operands) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) {
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%c32_i64 = arith.constant 32 : i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg4 : i32 to i64
%6 = arith.extui %arg5 : i32 to i64
%c32_i64_0 = arith.constant 32 : i64
%7 = arith.shli %6, %c32_i64_0 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.extui %arg7 : i32 to i64
%c32_i64_1 = arith.constant 32 : i64
%12 = arith.shli %11, %c32_i64_1 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 : i64 to index
%c0 = arith.constant 0 : index
%15 = flow.dispatch.workload.ordinal %14, 0 : index
%16 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15}
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
%18 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [%15, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15} -> tensor<?x8640x3200xf16>
%19 = tensor.empty(%15) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%15, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%c0_i64 = arith.constant 0 : i64
%c0_i32 = arith.constant 0 : i32
%c32_i64 = arith.constant 32 : i64
%c0_i64_1 = arith.constant 0 : i64
%c0_i32_2 = arith.constant 0 : i32
%c0_i64_3 = arith.constant 0 : i64
%c0_i32_4 = arith.constant 0 : i32
%c32_i64_5 = arith.constant 32 : i64
%c0_i64_6 = arith.constant 0 : i64
%c0_i32_7 = arith.constant 0 : i32
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%c32_i64_8 = arith.constant 32 : i64
%5 = arith.shrui %3, %c32_i64_8 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0_i32, %c0_i32_2, %c0_i32_4, %c0_i32_7, %4, %6 : i32, i32, i32, i32, i32, i32) {
ro %arg1[%c0_0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0_0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c0_i32 = arith.constant 0 : i32
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c0_i32 = arith.constant 0 : i32
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c0_i32 = arith.constant 0 : i32
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg4 : i32 to i64
%6 = arith.extui %arg5 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.extui %arg7 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 : i64 to index
%15 = flow.dispatch.workload.ordinal %14, 0 : index
%16 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15}
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
%18 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [%15, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15} -> tensor<?x8640x3200xf16>
%19 = tensor.empty(%15) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%15, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c0_i32 = arith.constant 0 : i32
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg4 : i32 to i64
%6 = arith.extui %arg5 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.extui %arg7 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 : i64 to index
%15 = flow.dispatch.workload.ordinal %14, 0 : index
%16 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15}
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
%18 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [%15, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15} -> tensor<?x8640x3200xf16>
%19 = tensor.empty(%15) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%15, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c0_i32 = arith.constant 0 : i32
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg4 : i32 to i64
%6 = arith.extui %arg5 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.extui %arg7 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 : i64 to index
%15 = flow.dispatch.workload.ordinal %14, 0 : index
%16 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15}
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
%18 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [%15, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15} -> tensor<?x8640x3200xf16>
%19 = tensor.empty(%15) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%15, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c0_i32 = arith.constant 0 : i32
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg4 : i32 to i64
%6 = arith.extui %arg5 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.extui %arg7 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 : i64 to index
%15 = flow.dispatch.workload.ordinal %14, 0 : index
%16 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15}
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
%18 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [%15, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15} -> tensor<?x8640x3200xf16>
%19 = tensor.empty(%15) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%15, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c0_i32 = arith.constant 0 : i32
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FoldUniformOperandsPass (iree-stream-fold-uniform-operands) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c0_i32 = arith.constant 0 : i32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %c0_i32 : i32 to i64
%1 = arith.extui %c0_i32 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %c0_i32 : i32 to i64
%6 = arith.extui %c0_i32 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg2 : i32 to i64
%11 = arith.extui %arg3 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 : i64 to index
%15 = flow.dispatch.workload.ordinal %14, 0 : index
%16 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15}
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
%18 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [%15, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%15} -> tensor<?x8640x3200xf16>
%19 = tensor.empty(%15) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%15, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%15}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c0_i32 = arith.constant 0 : i32
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyTargetEnvironmentPass (iree-hal-verify-target-environment) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
stream.executable private @pack_dispatch_0 {
stream.executable.export public @pack_dispatch_0_pack_f16 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) {
%c32_i64 = arith.constant 32 : i64
%c0 = arith.constant 0 : index
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 : i64 to index
%5 = flow.dispatch.workload.ordinal %4, 0 : index
%6 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5}
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
%8 = flow.dispatch.tensor.load %6, offsets = [0, 0, 0], sizes = [%5, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%5} -> tensor<?x8640x3200xf16>
%9 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5}
return
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeInterfacesPass (iree-hal-materialize-interfaces) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#pipeline_layout = #hal.pipeline.layout<push_constants = 2, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer>]>]>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
hal.executable private @pack_dispatch_0 {
hal.executable.variant public @embedded_elf_x86_64 target(#executable_target_embedded_elf_x86_64_) {
hal.executable.export public @pack_dispatch_0_pack_f16 ordinal(0) layout(#pipeline_layout) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>]} {
^bb0(%arg0: !hal.device, %arg1: index):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@embedded_elf_x86_64::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After PruneExecutablesPass (iree-hal-prune-executables) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#pipeline_layout = #hal.pipeline.layout<push_constants = 2, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer>]>]>
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]>
module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
hal.executable private @pack_dispatch_0 {
hal.executable.variant public @embedded_elf_x86_64 target(#executable_target_embedded_elf_x86_64_) {
hal.executable.export public @pack_dispatch_0_pack_f16 ordinal(0) layout(#pipeline_layout) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>]} {
^bb0(%arg0: !hal.device, %arg1: index):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
}
}
}
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@embedded_elf_x86_64::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After CPUMaterializeUpperBoundTileSize (iree-codegen-cpu-materialize-upper-bound-tile-size) //----- //
util.func public @pack(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @pack(%input0: tensor<?x8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} {
%c32_i64 = arith.constant 32 : i64
%c55296000 = arith.constant 55296000 : index
%c0 = arith.constant 0 : index
%c3200 = arith.constant 3200 : index
%c8640 = arith.constant 8640 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f16 = hal.element_type<f16> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major)
%1 = arith.muli %0, %c55296000 : index
%2 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x8640x3200xf16>{%0} in !stream.resource<external>{%1}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%1} => !stream.timepoint
%3 = arith.index_castui %0 : index to i64
%4 = arith.trunci %3 : i64 to i32
%5 = arith.shrui %3, %c32_i64 : i64
%6 = arith.trunci %5 : i64 to i32
%7 = stream.cmd.execute await(%result_timepoint) => with(%2 as %arg1: !stream.resource<external>{%1}, %result as %arg2: !stream.resource<external>{%1}) {
stream.cmd.dispatch @pack_dispatch_0::@embedded_elf_x86_64::@pack_dispatch_0_pack_f16[%0](%4, %6 : i32, i32) {
ro %arg1[%c0 for %1] : !stream.resource<external>{%1},
wo %arg2[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%1}
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After TypePropagation (iree-codegen-type-propagation) //----- //
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
// -----// IR Dump After BubbleUpOrdinalOps (iree-codegen-bubble-up-ordinal-ops) //----- //
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
// -----// IR Dump After BufferizeCopyOnlyDispatches (iree-codegen-bufferize-copy-only-dispatches) //----- //
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
// -----// IR Dump After DecomposeSoftmax (iree-codegen-decompose-softmax) //----- //
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
// -----// IR Dump After MaterializeUserConfigs (iree-codegen-materialize-user-configs) //----- //
module {
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
}
// -----// IR Dump After RematerializeParallelOps (iree-codegen-rematerialize-parallel-ops) //----- //
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
// -----// IR Dump After ExpandF16OpToF32 (iree-llvmcpu-expand-f16-op-to-f32) //----- //
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
// -----// IR Dump After CPUMaterializeEncoding (iree-codegen-cpu-materialize-encoding) //----- //
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
// -----// IR Dump After EraseHALDescriptorTypeFromMemRef (iree-codegen-erase-hal-descriptor-type-from-memref) //----- //
func.func @pack_dispatch_0_pack_f16() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
// -----// IR Dump After LLVMCPUSelectLoweringStrategy (iree-llvmcpu-select-lowering-strategy) //----- //
module {
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
}
// -----// IR Dump After ConfigureTargetExecutableVariantsPass (iree-hal-configure-target-executable-variants) //----- //
hal.executable.variant public @embedded_elf_x86_64 target(<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>) {
hal.executable.export public @pack_dispatch_0_pack_f16 ordinal(0) layout(#hal.pipeline.layout<push_constants = 2, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer>]>]>) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>]} {
^bb0(%arg0: !hal.device, %arg1: index):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
}
}
// -----// IR Dump After ConfigureExecutablesPass (iree-hal-configure-executables) //----- //
hal.executable private @pack_dispatch_0 {
hal.executable.variant public @embedded_elf_x86_64 target(<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>) {
hal.executable.export public @pack_dispatch_0_pack_f16 ordinal(0) layout(#hal.pipeline.layout<push_constants = 2, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer>]>]>) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>]} {
^bb0(%arg0: !hal.device, %arg1: index):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
}
}
}
// -----// IR Dump After LowerExecutableUsingTransformDialect (iree-codegen-lower-executable-using-transform-dialect) //----- //
module {
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = flow.dispatch.workload.ordinal %6, 0 : index
%8 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7}
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [%7, 8640, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%7} -> tensor<?x8640x3200xf16>
%11 = tensor.empty(%7) : tensor<?x540x3200x16x1xf16>
%pack = tensor.pack %10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16>
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%7, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%7}
return
}
}
// -----// IR Dump After TileAndDistributeToWorkgroups (iree-codegen-tile-and-distribute-to-workgroups) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c960 = arith.constant 960 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%17 = flow.dispatch.tensor.load %7, offsets = [%arg0, %16, %arg2], sizes = [%11, %c960, %c128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x?x?xf16>
%18 = tensor.empty(%11) : tensor<?x60x128x16x1xf16>
%pack = tensor.pack %17 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %18 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x?x?xf16> -> tensor<?x60x128x16x1xf16>
%cast = tensor.cast %pack : tensor<?x60x128x16x1xf16> to tensor<?x?x?x16x1xf16>
%19 = arith.extui %0 : i32 to i64
%20 = arith.extui %1 : i32 to i64
%21 = arith.shli %20, %c32_i64 : i64
%22 = arith.ori %19, %21 : i64
%23 = arith.index_castui %22 : i64 to index
flow.dispatch.tensor.store %cast, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, %c60, %c128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x?x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%23}
}
}
}
return
}
// -----// IR Dump After ConvertToDestinationPassingStyle (iree-codegen-convert-to-destination-passing-style) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c960 = arith.constant 960 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = arith.extui %0 : i32 to i64
%17 = arith.extui %1 : i32 to i64
%18 = arith.shli %17, %c32_i64 : i64
%19 = arith.ori %16, %18 : i64
%20 = arith.index_castui %19 : i64 to index
%21 = arith.extui %0 : i32 to i64
%22 = arith.extui %1 : i32 to i64
%23 = arith.shli %22, %c32_i64 : i64
%24 = arith.ori %21, %23 : i64
%25 = arith.index_castui %24 : i64 to index
%26 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%25]
%27 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%26, %c60, %c128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%20} -> tensor<?x?x?x16x1xf16>
%cast = tensor.cast %27 : tensor<?x?x?x16x1xf16> to tensor<?x60x128x16x1xf16>
%28 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%29 = flow.dispatch.tensor.load %7, offsets = [%arg0, %28, %arg2], sizes = [%11, %c960, %c128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x?x?xf16>
%pack = tensor.pack %29 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %cast {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x?x?xf16> -> tensor<?x60x128x16x1xf16>
%cast_0 = tensor.cast %pack : tensor<?x60x128x16x1xf16> to tensor<?x?x?x16x1xf16>
%30 = arith.extui %0 : i32 to i64
%31 = arith.extui %1 : i32 to i64
%32 = arith.shli %31, %c32_i64 : i64
%33 = arith.ori %30, %32 : i64
%34 = arith.index_castui %33 : i64 to index
flow.dispatch.tensor.store %cast_0, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, %c60, %c128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x?x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%34}
}
}
}
return
}
// -----// IR Dump After FoldAffineMinInDistributedLoops (iree-codegen-fold-affinemin-in-distributed-loops) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c960 = arith.constant 960 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = arith.extui %0 : i32 to i64
%17 = arith.extui %1 : i32 to i64
%18 = arith.shli %17, %c32_i64 : i64
%19 = arith.ori %16, %18 : i64
%20 = arith.index_castui %19 : i64 to index
%21 = arith.extui %0 : i32 to i64
%22 = arith.extui %1 : i32 to i64
%23 = arith.shli %22, %c32_i64 : i64
%24 = arith.ori %21, %23 : i64
%25 = arith.index_castui %24 : i64 to index
%26 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%25]
%27 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%26, %c60, %c128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%20} -> tensor<?x?x?x16x1xf16>
%cast = tensor.cast %27 : tensor<?x?x?x16x1xf16> to tensor<?x60x128x16x1xf16>
%28 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%29 = flow.dispatch.tensor.load %7, offsets = [%arg0, %28, %arg2], sizes = [%11, %c960, %c128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x?x?xf16>
%pack = tensor.pack %29 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %cast {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x?x?xf16> -> tensor<?x60x128x16x1xf16>
%cast_0 = tensor.cast %pack : tensor<?x60x128x16x1xf16> to tensor<?x?x?x16x1xf16>
%30 = arith.extui %0 : i32 to i64
%31 = arith.extui %1 : i32 to i64
%32 = arith.shli %31, %c32_i64 : i64
%33 = arith.ori %30, %32 : i64
%34 = arith.index_castui %33 : i64 to index
flow.dispatch.tensor.store %cast_0, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, %c60, %c128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x?x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%34}
}
}
}
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = arith.extui %0 : i32 to i64
%17 = arith.extui %1 : i32 to i64
%18 = arith.shli %17, %c32_i64 : i64
%19 = arith.ori %16, %18 : i64
%20 = arith.index_castui %19 : i64 to index
%21 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%20]
%22 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%21, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%23 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%24 = flow.dispatch.tensor.load %7, offsets = [%arg0, %23, %arg2], sizes = [%11, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %22 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x960x128xf16> -> tensor<?x60x128x16x1xf16>
flow.dispatch.tensor.store %pack, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %17, %arg2], sizes = [%11, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %16 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x960x128xf16> -> tensor<?x60x128x16x1xf16>
flow.dispatch.tensor.store %pack, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After FuseTensorPadWithConsumer (iree-codegen-fuse-tensor-pad-with-consumer) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %17, %arg2], sizes = [%11, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %16 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x960x128xf16> -> tensor<?x60x128x16x1xf16>
flow.dispatch.tensor.store %pack, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After ConcretizePadResultShape (iree-codegen-concretize-pad-result-shape) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %17, %arg2], sizes = [%11, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %16 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<?x960x128xf16> -> tensor<?x60x128x16x1xf16>
flow.dispatch.tensor.store %pack, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After LLVMCPUTile (iree-llvmcpu-tile) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %17, %arg2], sizes = [%11, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x60x128x16x1xf16>) {
%20 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x60x128x16x1xf16>) {
%21 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (tensor<?x60x128x16x1xf16>) {
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%extracted_slice = tensor.extract_slice %18[%arg3, %22, %arg7] [1, 16, 16] [1, 1, 1] : tensor<?x960x128xf16> to tensor<1x16x16xf16>
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> to tensor<1x1x16x16x1xf16>
%pack = tensor.pack %extracted_slice outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 60, 128], [1, 1, 16]]>} : tensor<1x16x16xf16> -> tensor<1x1x16x16x1xf16>
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x16x16x1xf16> into tensor<?x60x128x16x1xf16>
scf.yield %inserted_slice : tensor<?x60x128x16x1xf16>
}
scf.yield %21 : tensor<?x60x128x16x1xf16>
}
scf.yield %20 : tensor<?x60x128x16x1xf16>
}
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After DecomposePackUnPackOps (iree-codegen-decompose-pack-unpack-ops) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %17, %arg2], sizes = [%11, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x60x128x16x1xf16>) {
%20 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x60x128x16x1xf16>) {
%21 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (tensor<?x60x128x16x1xf16>) {
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%extracted_slice = tensor.extract_slice %18[%arg3, %22, %arg7] [1, 16, 16] [1, 1, 1] : tensor<?x960x128xf16> to tensor<1x16x16xf16>
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> to tensor<1x1x16x16x1xf16>
%expanded = tensor.expand_shape %extracted_slice [[0], [1, 2], [3, 4]] : tensor<1x16x16xf16> into tensor<1x1x16x16x1xf16>
%transposed = linalg.transpose ins(%expanded : tensor<1x1x16x16x1xf16>) outs(%extracted_slice_0 : tensor<1x1x16x16x1xf16>) permutation = [0, 1, 3, 2, 4]
%inserted_slice = tensor.insert_slice %transposed into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x16x16x1xf16> into tensor<?x60x128x16x1xf16>
scf.yield %inserted_slice : tensor<?x60x128x16x1xf16>
}
scf.yield %21 : tensor<?x60x128x16x1xf16>
}
scf.yield %20 : tensor<?x60x128x16x1xf16>
}
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After GenericVectorization (iree-codegen-generic-vectorization) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
scf.for %arg0 = %9 to %6 step %10 {
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
scf.for %arg1 = %12 to %c540 step %13 {
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%15 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg2 = %14 to %c3200 step %15 {
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %17, %arg2], sizes = [%11, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x60x128x16x1xf16>) {
%20 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x60x128x16x1xf16>) {
%21 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (tensor<?x60x128x16x1xf16>) {
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%extracted_slice = tensor.extract_slice %18[%arg3, %22, %arg7] [1, 16, 16] [1, 1, 1] : tensor<?x960x128xf16> to tensor<1x16x16xf16>
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> to tensor<1x1x16x16x1xf16>
%expanded = tensor.expand_shape %extracted_slice [[0], [1, 2], [3, 4]] : tensor<1x16x16xf16> into tensor<1x1x16x16x1xf16>
%23 = vector.transfer_read %expanded[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : tensor<1x1x16x16x1xf16>, vector<1x1x16x16x1xf16>
%24 = vector.transpose %23, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%25 = vector.transfer_write %24, %extracted_slice_0[%c0, %c0, %c0, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, tensor<1x1x16x16x1xf16>
%inserted_slice = tensor.insert_slice %25 into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x16x16x1xf16> into tensor<?x60x128x16x1xf16>
scf.yield %inserted_slice : tensor<?x60x128x16x1xf16>
}
scf.yield %21 : tensor<?x60x128x16x1xf16>
}
scf.yield %20 : tensor<?x60x128x16x1xf16>
}
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After OptimizeTensorInsertExtractSlices (iree-codegen-optimize-tensor-insert-extract-slices) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%17 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %16, %arg2], sizes = [%15, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%19 = scf.for %arg3 = %c0 to %15 step %c1 iter_args(%arg4 = %17) -> (tensor<?x60x128x16x1xf16>) {
%20 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x60x128x16x1xf16>) {
%21 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%22 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (tensor<?x60x128x16x1xf16>) {
%extracted_slice = tensor.extract_slice %18[%arg3, %21, %arg7] [1, 16, 16] [1, 1, 1] : tensor<?x960x128xf16> to tensor<1x16x16xf16>
%expanded = tensor.expand_shape %extracted_slice [[0], [1, 2], [3, 4]] : tensor<1x16x16xf16> into tensor<1x1x16x16x1xf16>
%23 = vector.transfer_read %expanded[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : tensor<1x1x16x16x1xf16>, vector<1x1x16x16x1xf16>
%24 = vector.transpose %23, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%25 = vector.transfer_write %24, %arg8[%arg3, %arg5, %arg7, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, tensor<?x60x128x16x1xf16>
scf.yield %25 : tensor<?x60x128x16x1xf16>
}
scf.yield %22 : tensor<?x60x128x16x1xf16>
}
scf.yield %20 : tensor<?x60x128x16x1xf16>
}
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%17 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %16, %arg2], sizes = [%15, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%19 = scf.for %arg3 = %c0 to %15 step %c1 iter_args(%arg4 = %17) -> (tensor<?x60x128x16x1xf16>) {
%20 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x60x128x16x1xf16>) {
%21 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%22 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (tensor<?x60x128x16x1xf16>) {
%extracted_slice = tensor.extract_slice %18[%arg3, %21, %arg7] [1, 16, 16] [1, 1, 1] : tensor<?x960x128xf16> to tensor<1x16x16xf16>
%expanded = tensor.expand_shape %extracted_slice [[0], [1, 2], [3, 4]] : tensor<1x16x16xf16> into tensor<1x1x16x16x1xf16>
%23 = vector.transfer_read %expanded[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : tensor<1x1x16x16x1xf16>, vector<1x1x16x16x1xf16>
%24 = vector.transpose %23, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%25 = vector.transfer_write %24, %arg8[%arg3, %arg5, %arg7, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, tensor<?x60x128x16x1xf16>
scf.yield %25 : tensor<?x60x128x16x1xf16>
}
scf.yield %22 : tensor<?x60x128x16x1xf16>
}
scf.yield %20 : tensor<?x60x128x16x1xf16>
}
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%17 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %16, %arg2], sizes = [%15, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%19 = scf.for %arg3 = %c0 to %15 step %c1 iter_args(%arg4 = %17) -> (tensor<?x60x128x16x1xf16>) {
%20 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x60x128x16x1xf16>) {
%21 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%22 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (tensor<?x60x128x16x1xf16>) {
%extracted_slice = tensor.extract_slice %18[%arg3, %21, %arg7] [1, 16, 16] [1, 1, 1] : tensor<?x960x128xf16> to tensor<1x16x16xf16>
%expanded = tensor.expand_shape %extracted_slice [[0], [1, 2], [3, 4]] : tensor<1x16x16xf16> into tensor<1x1x16x16x1xf16>
%23 = vector.transfer_read %expanded[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : tensor<1x1x16x16x1xf16>, vector<1x1x16x16x1xf16>
%24 = vector.transpose %23, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%25 = vector.transfer_write %24, %arg8[%arg3, %arg5, %arg7, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, tensor<?x60x128x16x1xf16>
scf.yield %25 : tensor<?x60x128x16x1xf16>
}
scf.yield %22 : tensor<?x60x128x16x1xf16>
}
scf.yield %20 : tensor<?x60x128x16x1xf16>
}
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After EliminateEmptyTensors (iree-eliminate-empty-tensors) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%17 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %16, %arg2], sizes = [%15, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%19 = scf.for %arg3 = %c0 to %15 step %c1 iter_args(%arg4 = %17) -> (tensor<?x60x128x16x1xf16>) {
%20 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x60x128x16x1xf16>) {
%21 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%22 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (tensor<?x60x128x16x1xf16>) {
%extracted_slice = tensor.extract_slice %18[%arg3, %21, %arg7] [1, 16, 16] [1, 1, 1] : tensor<?x960x128xf16> to tensor<1x16x16xf16>
%expanded = tensor.expand_shape %extracted_slice [[0], [1, 2], [3, 4]] : tensor<1x16x16xf16> into tensor<1x1x16x16x1xf16>
%23 = vector.transfer_read %expanded[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : tensor<1x1x16x16x1xf16>, vector<1x1x16x16x1xf16>
%24 = vector.transpose %23, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%25 = vector.transfer_write %24, %arg8[%arg3, %arg5, %arg7, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, tensor<?x60x128x16x1xf16>
scf.yield %25 : tensor<?x60x128x16x1xf16>
}
scf.yield %22 : tensor<?x60x128x16x1xf16>
}
scf.yield %20 : tensor<?x60x128x16x1xf16>
}
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After EmptyTensorToAllocTensor (empty-tensor-to-alloc-tensor) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6}
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%17 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x60x128x16x1xf16>
%18 = flow.dispatch.tensor.load %7, offsets = [%arg0, %16, %arg2], sizes = [%15, 960, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x8640x3200xf16>>{%6} -> tensor<?x960x128xf16>
%19 = scf.for %arg3 = %c0 to %15 step %c1 iter_args(%arg4 = %17) -> (tensor<?x60x128x16x1xf16>) {
%20 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x60x128x16x1xf16>) {
%21 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%22 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (tensor<?x60x128x16x1xf16>) {
%extracted_slice = tensor.extract_slice %18[%arg3, %21, %arg7] [1, 16, 16] [1, 1, 1] : tensor<?x960x128xf16> to tensor<1x16x16xf16>
%expanded = tensor.expand_shape %extracted_slice [[0], [1, 2], [3, 4]] : tensor<1x16x16xf16> into tensor<1x1x16x16x1xf16>
%23 = vector.transfer_read %expanded[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : tensor<1x1x16x16x1xf16>, vector<1x1x16x16x1xf16>
%24 = vector.transpose %23, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%25 = vector.transfer_write %24, %arg8[%arg3, %arg5, %arg7, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, tensor<?x60x128x16x1xf16>
scf.yield %25 : tensor<?x60x128x16x1xf16>
}
scf.yield %22 : tensor<?x60x128x16x1xf16>
}
scf.yield %20 : tensor<?x60x128x16x1xf16>
}
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%15, 60, 128, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x60x128x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6}
}
}
}
return
}
// -----// IR Dump After IREEComprehensiveBufferize (iree-codegen-iree-comprehensive-bufferize) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%17 = scf.for %arg3 = %c0 to %15 step %c1 iter_args(%arg4 = %subview) -> (memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
%18 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
%19 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%20 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
%subview_2 = memref.subview %subview_0[%arg3, %19, %arg7] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_2 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%21 = vector.transfer_read %expand_shape[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<1x1x16x16x1xf16>
%22 = vector.transpose %21, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
vector.transfer_write %22, %arg8[%arg3, %arg5, %arg7, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.yield %arg8 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
scf.yield %20 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
scf.yield %18 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
%subview_1 = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%17 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_1 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f16, %out: f16):
linalg.yield %in : f16
}
}
}
}
return
}
// -----// IR Dump After ResolveShapedTypeResultDims (resolve-shaped-type-result-dims) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%17 = scf.for %arg3 = %c0 to %15 step %c1 iter_args(%arg4 = %subview) -> (memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
%18 = scf.for %arg5 = %c0 to %c60 step %c1 iter_args(%arg6 = %arg4) -> (memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
%19 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5)
%20 = scf.for %arg7 = %c0 to %c128 step %c16 iter_args(%arg8 = %arg6) -> (memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
%subview_2 = memref.subview %subview_0[%arg3, %19, %arg7] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_2 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%21 = vector.transfer_read %expand_shape[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<1x1x16x16x1xf16>
%22 = vector.transpose %21, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
vector.transfer_write %22, %arg8[%arg3, %arg5, %arg7, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.yield %arg8 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
scf.yield %20 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
scf.yield %18 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
%subview_1 = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%17 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_1 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f16, %out: f16):
linalg.yield %in : f16
}
}
}
}
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_2 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_2 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%18 = vector.transfer_read %expand_shape[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<1x1x16x16x1xf16>
%19 = vector.transpose %18, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
vector.transfer_write %19, %subview[%arg3, %arg4, %arg5, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
}
}
%subview_1 = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_1 : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f16, %out: f16):
linalg.yield %in : f16
}
}
}
}
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%18 = vector.transfer_read %expand_shape[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<1x1x16x16x1xf16>
%19 = vector.transpose %18, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
vector.transfer_write %19, %subview[%arg3, %arg4, %arg5, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
}
}
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f16, %out: f16):
linalg.yield %in : f16
}
}
}
}
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%18 = vector.transfer_read %expand_shape[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<1x1x16x16x1xf16>
%19 = vector.transpose %18, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
vector.transfer_write %19, %subview[%arg3, %arg4, %arg5, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
}
}
}
}
}
return
}
// -----// IR Dump After CleanupBufferAllocView (iree-codegen-cleanup-buffer-alloc-view) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%18 = vector.transfer_read %expand_shape[%c0, %c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true, true, true]} : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<1x1x16x16x1xf16>
%19 = vector.transpose %18, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
vector.transfer_write %19, %subview[%arg3, %arg4, %arg5, %c0, %c0] {in_bounds = [true, true, true, true, true]} : vector<1x1x16x16x1xf16>, memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
}
}
}
}
}
return
}
// -----// IR Dump After LLVMCPUDropVectorUnitDims (iree-llvmcpu-drop-vector-unit-dims) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>
%18 = vector.transfer_read %subview_3[%c0, %c0], %cst {in_bounds = [true, true]} : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16x16xf16>
%19 = vector.shape_cast %18 : vector<16x16xf16> to vector<16x16x1xf16>
%20 = vector.broadcast %19 : vector<16x16x1xf16> to vector<1x1x16x16x1xf16>
%21 = vector.transpose %20, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%22 = vector.extract %21[0, 0] : vector<16x16x1xf16> from vector<1x1x16x16x1xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%23 = vector.shape_cast %22 : vector<16x16x1xf16> to vector<16x16xf16>
vector.transfer_write %23, %subview_4[%arg3, %arg4, %arg5, %c0] {in_bounds = [true, true]} : vector<16x16xf16>, memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
}
}
}
}
}
return
}
// -----// IR Dump After LLVMCPUVirtualVectorLowering (iree-llvmcpu-virtual-vector-lowering) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>
%18 = vector.transfer_read %subview_3[%c0, %c0], %cst {in_bounds = [true, true]} : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16x16xf16>
%19 = vector.shape_cast %18 : vector<16x16xf16> to vector<16x16x1xf16>
%20 = vector.broadcast %19 : vector<16x16x1xf16> to vector<1x1x16x16x1xf16>
%21 = vector.transpose %20, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%22 = vector.extract %21[0, 0] : vector<16x16x1xf16> from vector<1x1x16x16x1xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%23 = vector.shape_cast %22 : vector<16x16x1xf16> to vector<16x16xf16>
vector.transfer_write %23, %subview_4[%arg3, %arg4, %arg5, %c0] {in_bounds = [true, true]} : vector<16x16xf16>, memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
}
}
}
}
}
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%cst = arith.constant 0.000000e+00 : f16
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>
%18 = vector.transfer_read %subview_3[%c0, %c0], %cst {in_bounds = [true, true]} : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16x16xf16>
%19 = vector.shape_cast %18 : vector<16x16xf16> to vector<16x16x1xf16>
%20 = vector.broadcast %19 : vector<16x16x1xf16> to vector<1x1x16x16x1xf16>
%21 = vector.transpose %20, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%22 = vector.extract %21[0, 0] : vector<16x16x1xf16> from vector<1x1x16x16x1xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%23 = vector.shape_cast %22 : vector<16x16x1xf16> to vector<16x16xf16>
vector.transfer_write %23, %subview_4[%arg3, %arg4, %arg5, %c0] {in_bounds = [true, true]} : vector<16x16xf16>, memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
}
}
}
}
}
}
return
}
// -----// IR Dump After LLVMCPUVectorTransferLowering (iree-llvmcpu-vector-transfer-lowering) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%cst = arith.constant dense<0.000000e+00> : vector<16x16xf16>
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%19 = vector.insert %18, %cst [0] : vector<16xf16> into vector<16x16xf16>
%20 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%21 = vector.insert %20, %19 [1] : vector<16xf16> into vector<16x16xf16>
%22 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%23 = vector.insert %22, %21 [2] : vector<16xf16> into vector<16x16xf16>
%24 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%25 = vector.insert %24, %23 [3] : vector<16xf16> into vector<16x16xf16>
%26 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%27 = vector.insert %26, %25 [4] : vector<16xf16> into vector<16x16xf16>
%28 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%29 = vector.insert %28, %27 [5] : vector<16xf16> into vector<16x16xf16>
%30 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%31 = vector.insert %30, %29 [6] : vector<16xf16> into vector<16x16xf16>
%32 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%33 = vector.insert %32, %31 [7] : vector<16xf16> into vector<16x16xf16>
%34 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%35 = vector.insert %34, %33 [8] : vector<16xf16> into vector<16x16xf16>
%36 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%37 = vector.insert %36, %35 [9] : vector<16xf16> into vector<16x16xf16>
%38 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%39 = vector.insert %38, %37 [10] : vector<16xf16> into vector<16x16xf16>
%40 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%41 = vector.insert %40, %39 [11] : vector<16xf16> into vector<16x16xf16>
%42 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%43 = vector.insert %42, %41 [12] : vector<16xf16> into vector<16x16xf16>
%44 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%45 = vector.insert %44, %43 [13] : vector<16xf16> into vector<16x16xf16>
%46 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%47 = vector.insert %46, %45 [14] : vector<16xf16> into vector<16x16xf16>
%48 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%49 = vector.insert %48, %47 [15] : vector<16xf16> into vector<16x16xf16>
%50 = vector.shape_cast %49 : vector<16x16xf16> to vector<16x16x1xf16>
%51 = vector.broadcast %50 : vector<16x16x1xf16> to vector<1x1x16x16x1xf16>
%52 = vector.transpose %51, [0, 1, 3, 2, 4] : vector<1x1x16x16x1xf16> to vector<1x1x16x16x1xf16>
%53 = vector.extract %52[0, 0] : vector<16x16x1xf16> from vector<1x1x16x16x1xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%54 = vector.shape_cast %53 : vector<16x16x1xf16> to vector<16x16xf16>
%55 = vector.extract %54[0] : vector<16xf16> from vector<16x16xf16>
vector.store %55, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%56 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
%57 = vector.extract %54[1] : vector<16xf16> from vector<16x16xf16>
vector.store %57, %subview_4[%arg3, %arg4, %56, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%58 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
%59 = vector.extract %54[2] : vector<16xf16> from vector<16x16xf16>
vector.store %59, %subview_4[%arg3, %arg4, %58, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%60 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
%61 = vector.extract %54[3] : vector<16xf16> from vector<16x16xf16>
vector.store %61, %subview_4[%arg3, %arg4, %60, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%62 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
%63 = vector.extract %54[4] : vector<16xf16> from vector<16x16xf16>
vector.store %63, %subview_4[%arg3, %arg4, %62, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%64 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
%65 = vector.extract %54[5] : vector<16xf16> from vector<16x16xf16>
vector.store %65, %subview_4[%arg3, %arg4, %64, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%66 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
%67 = vector.extract %54[6] : vector<16xf16> from vector<16x16xf16>
vector.store %67, %subview_4[%arg3, %arg4, %66, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%68 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
%69 = vector.extract %54[7] : vector<16xf16> from vector<16x16xf16>
vector.store %69, %subview_4[%arg3, %arg4, %68, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%70 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
%71 = vector.extract %54[8] : vector<16xf16> from vector<16x16xf16>
vector.store %71, %subview_4[%arg3, %arg4, %70, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%72 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
%73 = vector.extract %54[9] : vector<16xf16> from vector<16x16xf16>
vector.store %73, %subview_4[%arg3, %arg4, %72, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%74 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
%75 = vector.extract %54[10] : vector<16xf16> from vector<16x16xf16>
vector.store %75, %subview_4[%arg3, %arg4, %74, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%76 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
%77 = vector.extract %54[11] : vector<16xf16> from vector<16x16xf16>
vector.store %77, %subview_4[%arg3, %arg4, %76, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%78 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
%79 = vector.extract %54[12] : vector<16xf16> from vector<16x16xf16>
vector.store %79, %subview_4[%arg3, %arg4, %78, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%80 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
%81 = vector.extract %54[13] : vector<16xf16> from vector<16x16xf16>
vector.store %81, %subview_4[%arg3, %arg4, %80, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%82 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
%83 = vector.extract %54[14] : vector<16xf16> from vector<16x16xf16>
vector.store %83, %subview_4[%arg3, %arg4, %82, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%84 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
%85 = vector.extract %54[15] : vector<16xf16> from vector<16x16xf16>
vector.store %85, %subview_4[%arg3, %arg4, %84, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After LLVMCPUVectorTransposeLowering (iree-llvmcpu-vector-transpose-lowering) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%cst = arith.constant dense<0.000000e+00> : vector<16x16xf16>
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%19 = vector.insert %18, %cst [0] : vector<16xf16> into vector<16x16xf16>
%20 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%21 = vector.insert %20, %19 [1] : vector<16xf16> into vector<16x16xf16>
%22 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%23 = vector.insert %22, %21 [2] : vector<16xf16> into vector<16x16xf16>
%24 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%25 = vector.insert %24, %23 [3] : vector<16xf16> into vector<16x16xf16>
%26 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%27 = vector.insert %26, %25 [4] : vector<16xf16> into vector<16x16xf16>
%28 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%29 = vector.insert %28, %27 [5] : vector<16xf16> into vector<16x16xf16>
%30 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%31 = vector.insert %30, %29 [6] : vector<16xf16> into vector<16x16xf16>
%32 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%33 = vector.insert %32, %31 [7] : vector<16xf16> into vector<16x16xf16>
%34 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%35 = vector.insert %34, %33 [8] : vector<16xf16> into vector<16x16xf16>
%36 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%37 = vector.insert %36, %35 [9] : vector<16xf16> into vector<16x16xf16>
%38 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%39 = vector.insert %38, %37 [10] : vector<16xf16> into vector<16x16xf16>
%40 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%41 = vector.insert %40, %39 [11] : vector<16xf16> into vector<16x16xf16>
%42 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%43 = vector.insert %42, %41 [12] : vector<16xf16> into vector<16x16xf16>
%44 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%45 = vector.insert %44, %43 [13] : vector<16xf16> into vector<16x16xf16>
%46 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%47 = vector.insert %46, %45 [14] : vector<16xf16> into vector<16x16xf16>
%48 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%49 = vector.insert %48, %47 [15] : vector<16xf16> into vector<16x16xf16>
%50 = vector.shape_cast %49 : vector<16x16xf16> to vector<16x16x1xf16>
%51 = vector.broadcast %50 : vector<16x16x1xf16> to vector<1x1x16x16x1xf16>
%52 = vector.shape_cast %51 : vector<1x1x16x16x1xf16> to vector<16x16xf16>
%53 = vector.extract %52[0] : vector<16xf16> from vector<16x16xf16>
%54 = vector.extract %52[1] : vector<16xf16> from vector<16x16xf16>
%55 = vector.extract %52[2] : vector<16xf16> from vector<16x16xf16>
%56 = vector.extract %52[3] : vector<16xf16> from vector<16x16xf16>
%57 = vector.extract %52[4] : vector<16xf16> from vector<16x16xf16>
%58 = vector.extract %52[5] : vector<16xf16> from vector<16x16xf16>
%59 = vector.extract %52[6] : vector<16xf16> from vector<16x16xf16>
%60 = vector.extract %52[7] : vector<16xf16> from vector<16x16xf16>
%61 = vector.extract %52[8] : vector<16xf16> from vector<16x16xf16>
%62 = vector.extract %52[9] : vector<16xf16> from vector<16x16xf16>
%63 = vector.extract %52[10] : vector<16xf16> from vector<16x16xf16>
%64 = vector.extract %52[11] : vector<16xf16> from vector<16x16xf16>
%65 = vector.extract %52[12] : vector<16xf16> from vector<16x16xf16>
%66 = vector.extract %52[13] : vector<16xf16> from vector<16x16xf16>
%67 = vector.extract %52[14] : vector<16xf16> from vector<16x16xf16>
%68 = vector.extract %52[15] : vector<16xf16> from vector<16x16xf16>
%69 = vector.shuffle %53, %54 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %53, %54 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %55, %56 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %55, %56 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %57, %58 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %57, %58 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %60 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %59, %60 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %62 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %61, %62 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %63, %64 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %63, %64 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %65, %66 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %65, %66 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %68 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %67, %68 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %71 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %69, %71 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %70, %72 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %70, %72 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %75 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %73, %75 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %74, %76 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %74, %76 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %77, %79 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %77, %79 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %78, %80 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %78, %80 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %81, %83 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%98 = vector.shuffle %81, %83 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%99 = vector.shuffle %82, %84 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%100 = vector.shuffle %82, %84 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%101 = vector.shuffle %85, %89 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%102 = vector.shuffle %86, %90 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%103 = vector.shuffle %87, %91 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%104 = vector.shuffle %88, %92 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%105 = vector.shuffle %85, %89 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%106 = vector.shuffle %86, %90 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%107 = vector.shuffle %87, %91 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%108 = vector.shuffle %88, %92 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%109 = vector.shuffle %93, %97 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%110 = vector.shuffle %94, %98 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%111 = vector.shuffle %95, %99 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%112 = vector.shuffle %96, %100 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%113 = vector.shuffle %93, %97 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%114 = vector.shuffle %94, %98 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%115 = vector.shuffle %95, %99 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%116 = vector.shuffle %96, %100 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%117 = vector.shuffle %101, %109 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%118 = vector.shuffle %102, %110 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%119 = vector.shuffle %103, %111 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%120 = vector.shuffle %104, %112 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%121 = vector.shuffle %105, %113 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%122 = vector.shuffle %106, %114 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%123 = vector.shuffle %107, %115 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%124 = vector.shuffle %108, %116 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%125 = vector.shuffle %101, %109 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%126 = vector.shuffle %102, %110 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%127 = vector.shuffle %103, %111 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%128 = vector.shuffle %104, %112 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%129 = vector.shuffle %105, %113 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%130 = vector.shuffle %106, %114 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%131 = vector.shuffle %107, %115 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%132 = vector.shuffle %108, %116 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%133 = vector.insert %117, %cst [0] : vector<16xf16> into vector<16x16xf16>
%134 = vector.insert %118, %133 [1] : vector<16xf16> into vector<16x16xf16>
%135 = vector.insert %119, %134 [2] : vector<16xf16> into vector<16x16xf16>
%136 = vector.insert %120, %135 [3] : vector<16xf16> into vector<16x16xf16>
%137 = vector.insert %121, %136 [4] : vector<16xf16> into vector<16x16xf16>
%138 = vector.insert %122, %137 [5] : vector<16xf16> into vector<16x16xf16>
%139 = vector.insert %123, %138 [6] : vector<16xf16> into vector<16x16xf16>
%140 = vector.insert %124, %139 [7] : vector<16xf16> into vector<16x16xf16>
%141 = vector.insert %125, %140 [8] : vector<16xf16> into vector<16x16xf16>
%142 = vector.insert %126, %141 [9] : vector<16xf16> into vector<16x16xf16>
%143 = vector.insert %127, %142 [10] : vector<16xf16> into vector<16x16xf16>
%144 = vector.insert %128, %143 [11] : vector<16xf16> into vector<16x16xf16>
%145 = vector.insert %129, %144 [12] : vector<16xf16> into vector<16x16xf16>
%146 = vector.insert %130, %145 [13] : vector<16xf16> into vector<16x16xf16>
%147 = vector.insert %131, %146 [14] : vector<16xf16> into vector<16x16xf16>
%148 = vector.insert %132, %147 [15] : vector<16xf16> into vector<16x16xf16>
%149 = vector.shape_cast %148 : vector<16x16xf16> to vector<1x1x16x16x1xf16>
%150 = vector.extract %149[0, 0] : vector<16x16x1xf16> from vector<1x1x16x16x1xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%151 = vector.shape_cast %150 : vector<16x16x1xf16> to vector<16x16xf16>
%152 = vector.extract %151[0] : vector<16xf16> from vector<16x16xf16>
vector.store %152, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%153 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
%154 = vector.extract %151[1] : vector<16xf16> from vector<16x16xf16>
vector.store %154, %subview_4[%arg3, %arg4, %153, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%155 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
%156 = vector.extract %151[2] : vector<16xf16> from vector<16x16xf16>
vector.store %156, %subview_4[%arg3, %arg4, %155, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%157 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
%158 = vector.extract %151[3] : vector<16xf16> from vector<16x16xf16>
vector.store %158, %subview_4[%arg3, %arg4, %157, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%159 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
%160 = vector.extract %151[4] : vector<16xf16> from vector<16x16xf16>
vector.store %160, %subview_4[%arg3, %arg4, %159, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%161 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
%162 = vector.extract %151[5] : vector<16xf16> from vector<16x16xf16>
vector.store %162, %subview_4[%arg3, %arg4, %161, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%163 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
%164 = vector.extract %151[6] : vector<16xf16> from vector<16x16xf16>
vector.store %164, %subview_4[%arg3, %arg4, %163, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%165 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
%166 = vector.extract %151[7] : vector<16xf16> from vector<16x16xf16>
vector.store %166, %subview_4[%arg3, %arg4, %165, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%167 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
%168 = vector.extract %151[8] : vector<16xf16> from vector<16x16xf16>
vector.store %168, %subview_4[%arg3, %arg4, %167, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%169 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
%170 = vector.extract %151[9] : vector<16xf16> from vector<16x16xf16>
vector.store %170, %subview_4[%arg3, %arg4, %169, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%171 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
%172 = vector.extract %151[10] : vector<16xf16> from vector<16x16xf16>
vector.store %172, %subview_4[%arg3, %arg4, %171, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%173 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
%174 = vector.extract %151[11] : vector<16xf16> from vector<16x16xf16>
vector.store %174, %subview_4[%arg3, %arg4, %173, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%175 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
%176 = vector.extract %151[12] : vector<16xf16> from vector<16x16xf16>
vector.store %176, %subview_4[%arg3, %arg4, %175, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%177 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
%178 = vector.extract %151[13] : vector<16xf16> from vector<16x16xf16>
vector.store %178, %subview_4[%arg3, %arg4, %177, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%179 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
%180 = vector.extract %151[14] : vector<16xf16> from vector<16x16xf16>
vector.store %180, %subview_4[%arg3, %arg4, %179, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%181 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
%182 = vector.extract %151[15] : vector<16xf16> from vector<16x16xf16>
vector.store %182, %subview_4[%arg3, %arg4, %181, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%35 = vector.shuffle %18, %19 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%36 = vector.shuffle %20, %21 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%37 = vector.shuffle %20, %21 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%38 = vector.shuffle %22, %23 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%39 = vector.shuffle %22, %23 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%40 = vector.shuffle %24, %25 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%41 = vector.shuffle %24, %25 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%42 = vector.shuffle %26, %27 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%43 = vector.shuffle %26, %27 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%44 = vector.shuffle %28, %29 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%45 = vector.shuffle %28, %29 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%46 = vector.shuffle %30, %31 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%47 = vector.shuffle %30, %31 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%48 = vector.shuffle %32, %33 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%49 = vector.shuffle %32, %33 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%50 = vector.shuffle %34, %36 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%51 = vector.shuffle %34, %36 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%52 = vector.shuffle %35, %37 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%53 = vector.shuffle %35, %37 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%54 = vector.shuffle %38, %40 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%55 = vector.shuffle %38, %40 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%56 = vector.shuffle %39, %41 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%57 = vector.shuffle %39, %41 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%58 = vector.shuffle %42, %44 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%59 = vector.shuffle %42, %44 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%60 = vector.shuffle %43, %45 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%61 = vector.shuffle %43, %45 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%62 = vector.shuffle %46, %48 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%63 = vector.shuffle %46, %48 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%64 = vector.shuffle %47, %49 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%65 = vector.shuffle %47, %49 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%66 = vector.shuffle %50, %54 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%67 = vector.shuffle %51, %55 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%68 = vector.shuffle %52, %56 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%69 = vector.shuffle %53, %57 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %50, %54 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %51, %55 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %52, %56 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %53, %57 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %58, %62 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %63 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %60, %64 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %65 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %58, %62 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %59, %63 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %60, %64 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %61, %65 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %66, %74 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %75 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %68, %76 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %77 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %70, %78 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %71, %79 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %72, %80 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %81 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %66, %74 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %67, %75 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %68, %76 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %69, %77 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %70, %78 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %71, %79 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %72, %80 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %73, %81 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
vector.store %82, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%98 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
vector.store %83, %subview_4[%arg3, %arg4, %98, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%99 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
vector.store %84, %subview_4[%arg3, %arg4, %99, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%100 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
vector.store %85, %subview_4[%arg3, %arg4, %100, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%101 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
vector.store %86, %subview_4[%arg3, %arg4, %101, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%102 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
vector.store %87, %subview_4[%arg3, %arg4, %102, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%103 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
vector.store %88, %subview_4[%arg3, %arg4, %103, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%104 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
vector.store %89, %subview_4[%arg3, %arg4, %104, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%105 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
vector.store %90, %subview_4[%arg3, %arg4, %105, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%106 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
vector.store %91, %subview_4[%arg3, %arg4, %106, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%107 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
vector.store %92, %subview_4[%arg3, %arg4, %107, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%108 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
vector.store %93, %subview_4[%arg3, %arg4, %108, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%109 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
vector.store %94, %subview_4[%arg3, %arg4, %109, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%110 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
vector.store %95, %subview_4[%arg3, %arg4, %110, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%111 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
vector.store %96, %subview_4[%arg3, %arg4, %111, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%112 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
vector.store %97, %subview_4[%arg3, %arg4, %112, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After LLVMCPUVectorShapeCastLowering (iree-llvmcpu-vector-shape-cast-lowering) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%35 = vector.shuffle %18, %19 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%36 = vector.shuffle %20, %21 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%37 = vector.shuffle %20, %21 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%38 = vector.shuffle %22, %23 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%39 = vector.shuffle %22, %23 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%40 = vector.shuffle %24, %25 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%41 = vector.shuffle %24, %25 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%42 = vector.shuffle %26, %27 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%43 = vector.shuffle %26, %27 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%44 = vector.shuffle %28, %29 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%45 = vector.shuffle %28, %29 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%46 = vector.shuffle %30, %31 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%47 = vector.shuffle %30, %31 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%48 = vector.shuffle %32, %33 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%49 = vector.shuffle %32, %33 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%50 = vector.shuffle %34, %36 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%51 = vector.shuffle %34, %36 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%52 = vector.shuffle %35, %37 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%53 = vector.shuffle %35, %37 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%54 = vector.shuffle %38, %40 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%55 = vector.shuffle %38, %40 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%56 = vector.shuffle %39, %41 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%57 = vector.shuffle %39, %41 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%58 = vector.shuffle %42, %44 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%59 = vector.shuffle %42, %44 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%60 = vector.shuffle %43, %45 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%61 = vector.shuffle %43, %45 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%62 = vector.shuffle %46, %48 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%63 = vector.shuffle %46, %48 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%64 = vector.shuffle %47, %49 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%65 = vector.shuffle %47, %49 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%66 = vector.shuffle %50, %54 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%67 = vector.shuffle %51, %55 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%68 = vector.shuffle %52, %56 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%69 = vector.shuffle %53, %57 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %50, %54 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %51, %55 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %52, %56 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %53, %57 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %58, %62 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %63 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %60, %64 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %65 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %58, %62 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %59, %63 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %60, %64 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %61, %65 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %66, %74 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %75 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %68, %76 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %77 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %70, %78 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %71, %79 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %72, %80 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %81 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %66, %74 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %67, %75 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %68, %76 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %69, %77 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %70, %78 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %71, %79 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %72, %80 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %73, %81 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
vector.store %82, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%98 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
vector.store %83, %subview_4[%arg3, %arg4, %98, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%99 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
vector.store %84, %subview_4[%arg3, %arg4, %99, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%100 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
vector.store %85, %subview_4[%arg3, %arg4, %100, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%101 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
vector.store %86, %subview_4[%arg3, %arg4, %101, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%102 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
vector.store %87, %subview_4[%arg3, %arg4, %102, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%103 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
vector.store %88, %subview_4[%arg3, %arg4, %103, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%104 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
vector.store %89, %subview_4[%arg3, %arg4, %104, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%105 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
vector.store %90, %subview_4[%arg3, %arg4, %105, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%106 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
vector.store %91, %subview_4[%arg3, %arg4, %106, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%107 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
vector.store %92, %subview_4[%arg3, %arg4, %107, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%108 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
vector.store %93, %subview_4[%arg3, %arg4, %108, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%109 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
vector.store %94, %subview_4[%arg3, %arg4, %109, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%110 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
vector.store %95, %subview_4[%arg3, %arg4, %110, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%111 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
vector.store %96, %subview_4[%arg3, %arg4, %111, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%112 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
vector.store %97, %subview_4[%arg3, %arg4, %112, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After LLVMCPULowerExecutableTarget (iree-llvmcpu-lower-executable-target) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%35 = vector.shuffle %18, %19 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%36 = vector.shuffle %20, %21 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%37 = vector.shuffle %20, %21 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%38 = vector.shuffle %22, %23 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%39 = vector.shuffle %22, %23 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%40 = vector.shuffle %24, %25 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%41 = vector.shuffle %24, %25 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%42 = vector.shuffle %26, %27 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%43 = vector.shuffle %26, %27 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%44 = vector.shuffle %28, %29 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%45 = vector.shuffle %28, %29 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%46 = vector.shuffle %30, %31 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%47 = vector.shuffle %30, %31 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%48 = vector.shuffle %32, %33 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%49 = vector.shuffle %32, %33 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%50 = vector.shuffle %34, %36 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%51 = vector.shuffle %34, %36 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%52 = vector.shuffle %35, %37 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%53 = vector.shuffle %35, %37 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%54 = vector.shuffle %38, %40 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%55 = vector.shuffle %38, %40 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%56 = vector.shuffle %39, %41 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%57 = vector.shuffle %39, %41 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%58 = vector.shuffle %42, %44 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%59 = vector.shuffle %42, %44 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%60 = vector.shuffle %43, %45 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%61 = vector.shuffle %43, %45 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%62 = vector.shuffle %46, %48 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%63 = vector.shuffle %46, %48 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%64 = vector.shuffle %47, %49 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%65 = vector.shuffle %47, %49 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%66 = vector.shuffle %50, %54 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%67 = vector.shuffle %51, %55 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%68 = vector.shuffle %52, %56 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%69 = vector.shuffle %53, %57 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %50, %54 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %51, %55 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %52, %56 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %53, %57 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %58, %62 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %63 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %60, %64 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %65 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %58, %62 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %59, %63 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %60, %64 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %61, %65 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %66, %74 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %75 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %68, %76 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %77 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %70, %78 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %71, %79 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %72, %80 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %81 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %66, %74 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %67, %75 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %68, %76 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %69, %77 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %70, %78 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %71, %79 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %72, %80 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %73, %81 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
vector.store %82, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%98 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
vector.store %83, %subview_4[%arg3, %arg4, %98, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%99 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
vector.store %84, %subview_4[%arg3, %arg4, %99, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%100 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
vector.store %85, %subview_4[%arg3, %arg4, %100, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%101 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
vector.store %86, %subview_4[%arg3, %arg4, %101, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%102 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
vector.store %87, %subview_4[%arg3, %arg4, %102, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%103 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
vector.store %88, %subview_4[%arg3, %arg4, %103, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%104 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
vector.store %89, %subview_4[%arg3, %arg4, %104, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%105 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
vector.store %90, %subview_4[%arg3, %arg4, %105, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%106 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
vector.store %91, %subview_4[%arg3, %arg4, %106, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%107 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
vector.store %92, %subview_4[%arg3, %arg4, %107, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%108 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
vector.store %93, %subview_4[%arg3, %arg4, %108, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%109 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
vector.store %94, %subview_4[%arg3, %arg4, %109, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%110 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
vector.store %95, %subview_4[%arg3, %arg4, %110, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%111 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
vector.store %96, %subview_4[%arg3, %arg4, %111, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
%112 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
vector.store %97, %subview_4[%arg3, %arg4, %112, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After EraseHALDescriptorTypeFromMemRef (iree-codegen-erase-hal-descriptor-type-from-memref) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%35 = vector.shuffle %18, %19 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%36 = vector.shuffle %20, %21 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%37 = vector.shuffle %20, %21 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%38 = vector.shuffle %22, %23 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%39 = vector.shuffle %22, %23 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%40 = vector.shuffle %24, %25 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%41 = vector.shuffle %24, %25 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%42 = vector.shuffle %26, %27 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%43 = vector.shuffle %26, %27 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%44 = vector.shuffle %28, %29 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%45 = vector.shuffle %28, %29 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%46 = vector.shuffle %30, %31 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%47 = vector.shuffle %30, %31 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%48 = vector.shuffle %32, %33 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%49 = vector.shuffle %32, %33 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%50 = vector.shuffle %34, %36 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%51 = vector.shuffle %34, %36 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%52 = vector.shuffle %35, %37 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%53 = vector.shuffle %35, %37 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%54 = vector.shuffle %38, %40 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%55 = vector.shuffle %38, %40 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%56 = vector.shuffle %39, %41 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%57 = vector.shuffle %39, %41 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%58 = vector.shuffle %42, %44 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%59 = vector.shuffle %42, %44 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%60 = vector.shuffle %43, %45 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%61 = vector.shuffle %43, %45 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%62 = vector.shuffle %46, %48 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%63 = vector.shuffle %46, %48 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%64 = vector.shuffle %47, %49 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%65 = vector.shuffle %47, %49 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%66 = vector.shuffle %50, %54 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%67 = vector.shuffle %51, %55 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%68 = vector.shuffle %52, %56 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%69 = vector.shuffle %53, %57 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %50, %54 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %51, %55 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %52, %56 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %53, %57 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %58, %62 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %63 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %60, %64 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %65 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %58, %62 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %59, %63 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %60, %64 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %61, %65 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %66, %74 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %75 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %68, %76 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %77 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %70, %78 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %71, %79 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %72, %80 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %81 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %66, %74 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %67, %75 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %68, %76 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %69, %77 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %70, %78 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %71, %79 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %72, %80 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %73, %81 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>
vector.store %82, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%98 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
vector.store %83, %subview_4[%arg3, %arg4, %98, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%99 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
vector.store %84, %subview_4[%arg3, %arg4, %99, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%100 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
vector.store %85, %subview_4[%arg3, %arg4, %100, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%101 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
vector.store %86, %subview_4[%arg3, %arg4, %101, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%102 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
vector.store %87, %subview_4[%arg3, %arg4, %102, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%103 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
vector.store %88, %subview_4[%arg3, %arg4, %103, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%104 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
vector.store %89, %subview_4[%arg3, %arg4, %104, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%105 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
vector.store %90, %subview_4[%arg3, %arg4, %105, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%106 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
vector.store %91, %subview_4[%arg3, %arg4, %106, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%107 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
vector.store %92, %subview_4[%arg3, %arg4, %107, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%108 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
vector.store %93, %subview_4[%arg3, %arg4, %108, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%109 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
vector.store %94, %subview_4[%arg3, %arg4, %109, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%110 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
vector.store %95, %subview_4[%arg3, %arg4, %110, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%111 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
vector.store %96, %subview_4[%arg3, %arg4, %111, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%112 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
vector.store %97, %subview_4[%arg3, %arg4, %112, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After LowerUKernelOpsToCalls (iree-codegen-lower-ukernel-ops-to-calls) //----- //
module {
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%35 = vector.shuffle %18, %19 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%36 = vector.shuffle %20, %21 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%37 = vector.shuffle %20, %21 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%38 = vector.shuffle %22, %23 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%39 = vector.shuffle %22, %23 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%40 = vector.shuffle %24, %25 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%41 = vector.shuffle %24, %25 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%42 = vector.shuffle %26, %27 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%43 = vector.shuffle %26, %27 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%44 = vector.shuffle %28, %29 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%45 = vector.shuffle %28, %29 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%46 = vector.shuffle %30, %31 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%47 = vector.shuffle %30, %31 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%48 = vector.shuffle %32, %33 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%49 = vector.shuffle %32, %33 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%50 = vector.shuffle %34, %36 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%51 = vector.shuffle %34, %36 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%52 = vector.shuffle %35, %37 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%53 = vector.shuffle %35, %37 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%54 = vector.shuffle %38, %40 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%55 = vector.shuffle %38, %40 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%56 = vector.shuffle %39, %41 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%57 = vector.shuffle %39, %41 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%58 = vector.shuffle %42, %44 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%59 = vector.shuffle %42, %44 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%60 = vector.shuffle %43, %45 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%61 = vector.shuffle %43, %45 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%62 = vector.shuffle %46, %48 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%63 = vector.shuffle %46, %48 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%64 = vector.shuffle %47, %49 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%65 = vector.shuffle %47, %49 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%66 = vector.shuffle %50, %54 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%67 = vector.shuffle %51, %55 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%68 = vector.shuffle %52, %56 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%69 = vector.shuffle %53, %57 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %50, %54 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %51, %55 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %52, %56 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %53, %57 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %58, %62 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %63 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %60, %64 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %65 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %58, %62 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %59, %63 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %60, %64 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %61, %65 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %66, %74 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %75 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %68, %76 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %77 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %70, %78 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %71, %79 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %72, %80 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %81 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %66, %74 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %67, %75 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %68, %76 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %69, %77 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %70, %78 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %71, %79 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %72, %80 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %73, %81 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>
vector.store %82, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%98 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
vector.store %83, %subview_4[%arg3, %arg4, %98, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%99 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
vector.store %84, %subview_4[%arg3, %arg4, %99, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%100 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
vector.store %85, %subview_4[%arg3, %arg4, %100, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%101 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
vector.store %86, %subview_4[%arg3, %arg4, %101, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%102 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
vector.store %87, %subview_4[%arg3, %arg4, %102, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%103 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
vector.store %88, %subview_4[%arg3, %arg4, %103, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%104 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
vector.store %89, %subview_4[%arg3, %arg4, %104, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%105 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
vector.store %90, %subview_4[%arg3, %arg4, %105, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%106 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
vector.store %91, %subview_4[%arg3, %arg4, %106, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%107 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
vector.store %92, %subview_4[%arg3, %arg4, %107, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%108 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
vector.store %93, %subview_4[%arg3, %arg4, %108, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%109 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
vector.store %94, %subview_4[%arg3, %arg4, %109, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%110 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
vector.store %95, %subview_4[%arg3, %arg4, %110, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%111 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
vector.store %96, %subview_4[%arg3, %arg4, %111, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%112 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
vector.store %97, %subview_4[%arg3, %arg4, %112, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
}
}
}
}
}
}
return
}
}
// -----// IR Dump After LinalgExtToLoops (iree-linalg-ext-to-loops) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%35 = vector.shuffle %18, %19 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%36 = vector.shuffle %20, %21 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%37 = vector.shuffle %20, %21 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%38 = vector.shuffle %22, %23 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%39 = vector.shuffle %22, %23 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%40 = vector.shuffle %24, %25 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%41 = vector.shuffle %24, %25 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%42 = vector.shuffle %26, %27 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%43 = vector.shuffle %26, %27 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%44 = vector.shuffle %28, %29 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%45 = vector.shuffle %28, %29 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%46 = vector.shuffle %30, %31 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%47 = vector.shuffle %30, %31 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%48 = vector.shuffle %32, %33 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%49 = vector.shuffle %32, %33 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%50 = vector.shuffle %34, %36 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%51 = vector.shuffle %34, %36 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%52 = vector.shuffle %35, %37 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%53 = vector.shuffle %35, %37 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%54 = vector.shuffle %38, %40 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%55 = vector.shuffle %38, %40 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%56 = vector.shuffle %39, %41 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%57 = vector.shuffle %39, %41 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%58 = vector.shuffle %42, %44 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%59 = vector.shuffle %42, %44 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%60 = vector.shuffle %43, %45 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%61 = vector.shuffle %43, %45 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%62 = vector.shuffle %46, %48 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%63 = vector.shuffle %46, %48 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%64 = vector.shuffle %47, %49 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%65 = vector.shuffle %47, %49 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%66 = vector.shuffle %50, %54 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%67 = vector.shuffle %51, %55 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%68 = vector.shuffle %52, %56 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%69 = vector.shuffle %53, %57 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %50, %54 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %51, %55 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %52, %56 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %53, %57 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %58, %62 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %63 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %60, %64 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %65 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %58, %62 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %59, %63 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %60, %64 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %61, %65 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %66, %74 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %75 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %68, %76 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %77 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %70, %78 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %71, %79 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %72, %80 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %81 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %66, %74 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %67, %75 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %68, %76 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %69, %77 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %70, %78 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %71, %79 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %72, %80 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %73, %81 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>
vector.store %82, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%98 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
vector.store %83, %subview_4[%arg3, %arg4, %98, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%99 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
vector.store %84, %subview_4[%arg3, %arg4, %99, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%100 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
vector.store %85, %subview_4[%arg3, %arg4, %100, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%101 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
vector.store %86, %subview_4[%arg3, %arg4, %101, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%102 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
vector.store %87, %subview_4[%arg3, %arg4, %102, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%103 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
vector.store %88, %subview_4[%arg3, %arg4, %103, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%104 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
vector.store %89, %subview_4[%arg3, %arg4, %104, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%105 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
vector.store %90, %subview_4[%arg3, %arg4, %105, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%106 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
vector.store %91, %subview_4[%arg3, %arg4, %106, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%107 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
vector.store %92, %subview_4[%arg3, %arg4, %107, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%108 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
vector.store %93, %subview_4[%arg3, %arg4, %108, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%109 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
vector.store %94, %subview_4[%arg3, %arg4, %109, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%110 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
vector.store %95, %subview_4[%arg3, %arg4, %110, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%111 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
vector.store %96, %subview_4[%arg3, %arg4, %111, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%112 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
vector.store %97, %subview_4[%arg3, %arg4, %112, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After MemrefCopyToLinalgPass (iree-codegen-memrefcopy-to-linalg) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%35 = vector.shuffle %18, %19 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%36 = vector.shuffle %20, %21 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%37 = vector.shuffle %20, %21 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%38 = vector.shuffle %22, %23 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%39 = vector.shuffle %22, %23 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%40 = vector.shuffle %24, %25 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%41 = vector.shuffle %24, %25 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%42 = vector.shuffle %26, %27 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%43 = vector.shuffle %26, %27 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%44 = vector.shuffle %28, %29 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%45 = vector.shuffle %28, %29 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%46 = vector.shuffle %30, %31 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%47 = vector.shuffle %30, %31 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%48 = vector.shuffle %32, %33 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%49 = vector.shuffle %32, %33 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%50 = vector.shuffle %34, %36 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%51 = vector.shuffle %34, %36 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%52 = vector.shuffle %35, %37 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%53 = vector.shuffle %35, %37 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%54 = vector.shuffle %38, %40 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%55 = vector.shuffle %38, %40 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%56 = vector.shuffle %39, %41 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%57 = vector.shuffle %39, %41 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%58 = vector.shuffle %42, %44 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%59 = vector.shuffle %42, %44 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%60 = vector.shuffle %43, %45 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%61 = vector.shuffle %43, %45 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%62 = vector.shuffle %46, %48 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%63 = vector.shuffle %46, %48 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%64 = vector.shuffle %47, %49 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%65 = vector.shuffle %47, %49 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%66 = vector.shuffle %50, %54 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%67 = vector.shuffle %51, %55 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%68 = vector.shuffle %52, %56 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%69 = vector.shuffle %53, %57 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %50, %54 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %51, %55 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %52, %56 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %53, %57 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %58, %62 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %63 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %60, %64 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %65 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %58, %62 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %59, %63 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %60, %64 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %61, %65 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %66, %74 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %75 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %68, %76 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %77 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %70, %78 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %71, %79 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %72, %80 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %81 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %66, %74 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %67, %75 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %68, %76 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %69, %77 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %70, %78 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %71, %79 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %72, %80 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %73, %81 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>
vector.store %82, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%98 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
vector.store %83, %subview_4[%arg3, %arg4, %98, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%99 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
vector.store %84, %subview_4[%arg3, %arg4, %99, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%100 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
vector.store %85, %subview_4[%arg3, %arg4, %100, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%101 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
vector.store %86, %subview_4[%arg3, %arg4, %101, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%102 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
vector.store %87, %subview_4[%arg3, %arg4, %102, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%103 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
vector.store %88, %subview_4[%arg3, %arg4, %103, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%104 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
vector.store %89, %subview_4[%arg3, %arg4, %104, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%105 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
vector.store %90, %subview_4[%arg3, %arg4, %105, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%106 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
vector.store %91, %subview_4[%arg3, %arg4, %106, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%107 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
vector.store %92, %subview_4[%arg3, %arg4, %107, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%108 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
vector.store %93, %subview_4[%arg3, %arg4, %108, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%109 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
vector.store %94, %subview_4[%arg3, %arg4, %109, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%110 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
vector.store %95, %subview_4[%arg3, %arg4, %110, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%111 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
vector.store %96, %subview_4[%arg3, %arg4, %111, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%112 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
vector.store %97, %subview_4[%arg3, %arg4, %112, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After ConvertLinalgToLoopsPass (convert-linalg-to-loops) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%35 = vector.shuffle %18, %19 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%36 = vector.shuffle %20, %21 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%37 = vector.shuffle %20, %21 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%38 = vector.shuffle %22, %23 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%39 = vector.shuffle %22, %23 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%40 = vector.shuffle %24, %25 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%41 = vector.shuffle %24, %25 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%42 = vector.shuffle %26, %27 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%43 = vector.shuffle %26, %27 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%44 = vector.shuffle %28, %29 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%45 = vector.shuffle %28, %29 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%46 = vector.shuffle %30, %31 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%47 = vector.shuffle %30, %31 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%48 = vector.shuffle %32, %33 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%49 = vector.shuffle %32, %33 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%50 = vector.shuffle %34, %36 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%51 = vector.shuffle %34, %36 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%52 = vector.shuffle %35, %37 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%53 = vector.shuffle %35, %37 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%54 = vector.shuffle %38, %40 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%55 = vector.shuffle %38, %40 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%56 = vector.shuffle %39, %41 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%57 = vector.shuffle %39, %41 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%58 = vector.shuffle %42, %44 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%59 = vector.shuffle %42, %44 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%60 = vector.shuffle %43, %45 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%61 = vector.shuffle %43, %45 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%62 = vector.shuffle %46, %48 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%63 = vector.shuffle %46, %48 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%64 = vector.shuffle %47, %49 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%65 = vector.shuffle %47, %49 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%66 = vector.shuffle %50, %54 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%67 = vector.shuffle %51, %55 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%68 = vector.shuffle %52, %56 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%69 = vector.shuffle %53, %57 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %50, %54 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %51, %55 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %52, %56 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %53, %57 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %58, %62 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %63 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %60, %64 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %65 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %58, %62 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %59, %63 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %60, %64 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %61, %65 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %66, %74 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %75 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %68, %76 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %77 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %70, %78 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %71, %79 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %72, %80 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %81 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %66, %74 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %67, %75 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %68, %76 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %69, %77 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %70, %78 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %71, %79 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %72, %80 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %73, %81 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>
vector.store %82, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%98 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
vector.store %83, %subview_4[%arg3, %arg4, %98, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%99 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
vector.store %84, %subview_4[%arg3, %arg4, %99, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%100 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
vector.store %85, %subview_4[%arg3, %arg4, %100, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%101 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
vector.store %86, %subview_4[%arg3, %arg4, %101, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%102 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
vector.store %87, %subview_4[%arg3, %arg4, %102, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%103 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
vector.store %88, %subview_4[%arg3, %arg4, %103, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%104 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
vector.store %89, %subview_4[%arg3, %arg4, %104, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%105 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
vector.store %90, %subview_4[%arg3, %arg4, %105, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%106 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
vector.store %91, %subview_4[%arg3, %arg4, %106, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%107 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
vector.store %92, %subview_4[%arg3, %arg4, %107, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%108 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
vector.store %93, %subview_4[%arg3, %arg4, %108, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%109 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
vector.store %94, %subview_4[%arg3, %arg4, %109, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%110 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
vector.store %95, %subview_4[%arg3, %arg4, %110, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%111 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
vector.store %96, %subview_4[%arg3, %arg4, %111, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%112 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
vector.store %97, %subview_4[%arg3, %arg4, %112, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After ConvertBf16ArithToF32 (iree-convert-bf16-arith-to-f32) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%35 = vector.shuffle %18, %19 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%36 = vector.shuffle %20, %21 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%37 = vector.shuffle %20, %21 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%38 = vector.shuffle %22, %23 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%39 = vector.shuffle %22, %23 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%40 = vector.shuffle %24, %25 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%41 = vector.shuffle %24, %25 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%42 = vector.shuffle %26, %27 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%43 = vector.shuffle %26, %27 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%44 = vector.shuffle %28, %29 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%45 = vector.shuffle %28, %29 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%46 = vector.shuffle %30, %31 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%47 = vector.shuffle %30, %31 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%48 = vector.shuffle %32, %33 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<16xf16>, vector<16xf16>
%49 = vector.shuffle %32, %33 [2, 18, 3, 19, 6, 22, 7, 23, 10, 26, 11, 27, 14, 30, 15, 31] : vector<16xf16>, vector<16xf16>
%50 = vector.shuffle %34, %36 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%51 = vector.shuffle %34, %36 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%52 = vector.shuffle %35, %37 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%53 = vector.shuffle %35, %37 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%54 = vector.shuffle %38, %40 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%55 = vector.shuffle %38, %40 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%56 = vector.shuffle %39, %41 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%57 = vector.shuffle %39, %41 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%58 = vector.shuffle %42, %44 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%59 = vector.shuffle %42, %44 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%60 = vector.shuffle %43, %45 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%61 = vector.shuffle %43, %45 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%62 = vector.shuffle %46, %48 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%63 = vector.shuffle %46, %48 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%64 = vector.shuffle %47, %49 [0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29] : vector<16xf16>, vector<16xf16>
%65 = vector.shuffle %47, %49 [2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31] : vector<16xf16>, vector<16xf16>
%66 = vector.shuffle %50, %54 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%67 = vector.shuffle %51, %55 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%68 = vector.shuffle %52, %56 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%69 = vector.shuffle %53, %57 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%70 = vector.shuffle %50, %54 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%71 = vector.shuffle %51, %55 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%72 = vector.shuffle %52, %56 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%73 = vector.shuffle %53, %57 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%74 = vector.shuffle %58, %62 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%75 = vector.shuffle %59, %63 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%76 = vector.shuffle %60, %64 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%77 = vector.shuffle %61, %65 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%78 = vector.shuffle %58, %62 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%79 = vector.shuffle %59, %63 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%80 = vector.shuffle %60, %64 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%81 = vector.shuffle %61, %65 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%82 = vector.shuffle %66, %74 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%83 = vector.shuffle %67, %75 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%84 = vector.shuffle %68, %76 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%85 = vector.shuffle %69, %77 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%86 = vector.shuffle %70, %78 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%87 = vector.shuffle %71, %79 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%88 = vector.shuffle %72, %80 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%89 = vector.shuffle %73, %81 [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27] : vector<16xf16>, vector<16xf16>
%90 = vector.shuffle %66, %74 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%91 = vector.shuffle %67, %75 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%92 = vector.shuffle %68, %76 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%93 = vector.shuffle %69, %77 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%94 = vector.shuffle %70, %78 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%95 = vector.shuffle %71, %79 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%96 = vector.shuffle %72, %80 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%97 = vector.shuffle %73, %81 [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31] : vector<16xf16>, vector<16xf16>
%subview_4 = memref.subview %subview[0, 0, 0, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>> to memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>
vector.store %82, %subview_4[%arg3, %arg4, %arg5, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%98 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg5)
vector.store %83, %subview_4[%arg3, %arg4, %98, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%99 = affine.apply affine_map<(d0) -> (d0 + 2)>(%arg5)
vector.store %84, %subview_4[%arg3, %arg4, %99, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%100 = affine.apply affine_map<(d0) -> (d0 + 3)>(%arg5)
vector.store %85, %subview_4[%arg3, %arg4, %100, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%101 = affine.apply affine_map<(d0) -> (d0 + 4)>(%arg5)
vector.store %86, %subview_4[%arg3, %arg4, %101, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%102 = affine.apply affine_map<(d0) -> (d0 + 5)>(%arg5)
vector.store %87, %subview_4[%arg3, %arg4, %102, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%103 = affine.apply affine_map<(d0) -> (d0 + 6)>(%arg5)
vector.store %88, %subview_4[%arg3, %arg4, %103, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%104 = affine.apply affine_map<(d0) -> (d0 + 7)>(%arg5)
vector.store %89, %subview_4[%arg3, %arg4, %104, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%105 = affine.apply affine_map<(d0) -> (d0 + 8)>(%arg5)
vector.store %90, %subview_4[%arg3, %arg4, %105, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%106 = affine.apply affine_map<(d0) -> (d0 + 9)>(%arg5)
vector.store %91, %subview_4[%arg3, %arg4, %106, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%107 = affine.apply affine_map<(d0) -> (d0 + 10)>(%arg5)
vector.store %92, %subview_4[%arg3, %arg4, %107, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%108 = affine.apply affine_map<(d0) -> (d0 + 11)>(%arg5)
vector.store %93, %subview_4[%arg3, %arg4, %108, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%109 = affine.apply affine_map<(d0) -> (d0 + 12)>(%arg5)
vector.store %94, %subview_4[%arg3, %arg4, %109, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%110 = affine.apply affine_map<(d0) -> (d0 + 13)>(%arg5)
vector.store %95, %subview_4[%arg3, %arg4, %110, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%111 = affine.apply affine_map<(d0) -> (d0 + 14)>(%arg5)
vector.store %96, %subview_4[%arg3, %arg4, %111, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
%112 = affine.apply affine_map<(d0) -> (d0 + 15)>(%arg5)
vector.store %97, %subview_4[%arg3, %arg4, %112, %c0] : memref<?x60x128x16xf16, strided<[27648000, 51200, 16, 1], offset: ?>>, vector<16xf16>
}
}
}
}
}
}
return
}
// -----// IR Dump After ConvertBf16ToUInt16Buffers (iree-convert-bf16-to-uint16-buffers) //----- //
func.func @pack_dispatch_0_pack_f16() attributes {translation_info = #iree_codegen.translation_info<CPUDataTiling>} {
%c15 = arith.constant 15 : index
%c14 = arith.constant 14 : index
%c13 = arith.constant 13 : index
%c12 = arith.constant 12 : index
%c11 = arith.constant 11 : index
%c10 = arith.constant 10 : index
%c9 = arith.constant 9 : index
%c8 = arith.constant 8 : index
%c7 = arith.constant 7 : index
%c6 = arith.constant 6 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
%c2 = arith.constant 2 : index
%c16 = arith.constant 16 : index
%c128 = arith.constant 128 : index
%c60 = arith.constant 60 : index
%c1 = arith.constant 1 : index
%c3200 = arith.constant 3200 : index
%c540 = arith.constant 540 : index
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
%0 = hal.interface.constant.load[0] : i32
%1 = hal.interface.constant.load[1] : i32
%2 = arith.extui %0 : i32 to i64
%3 = arith.extui %1 : i32 to i64
%4 = arith.shli %3, %c32_i64 : i64
%5 = arith.ori %2, %4 : i64
%6 = arith.index_castui %5 : i64 to index
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<?x8640x3200xf16>{%6}
memref.assume_alignment %7, 64 : memref<?x8640x3200xf16>
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16>{%6}
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16>
%workgroup_id_x = hal.interface.workgroup.id[0] : index
%workgroup_count_x = hal.interface.workgroup.count[0] : index
%workgroup_id_y = hal.interface.workgroup.id[1] : index
%workgroup_count_y = hal.interface.workgroup.count[1] : index
%workgroup_id_z = hal.interface.workgroup.id[2] : index
%workgroup_count_z = hal.interface.workgroup.count[2] : index
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z]
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z]
%11 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_id_y]
%12 = affine.apply affine_map<()[s0] -> (s0 * 60)>()[%workgroup_count_y]
%13 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_id_x]
%14 = affine.apply affine_map<()[s0] -> (s0 * 128)>()[%workgroup_count_x]
scf.for %arg0 = %9 to %6 step %10 {
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6]
scf.for %arg1 = %11 to %c540 step %12 {
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1)
scf.for %arg2 = %13 to %c3200 step %14 {
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 60, 128, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16> to memref<?x60x128x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>>
%subview_0 = memref.subview %7[%arg0, %16, %arg2] [%15, 960, 128] [1, 1, 1] : memref<?x8640x3200xf16> to memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>>
scf.for %arg3 = %c0 to %15 step %c1 {
scf.for %arg4 = %c0 to %c60 step %c1 {
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4)
scf.for %arg5 = %c0 to %c128 step %c16 {
%subview_1 = memref.subview %subview_0[%arg3, %17, %arg5] [1, 16, 16] [1, 1, 1] : memref<?x960x128xf16, strided<[27648000, 3200, 1], offset: ?>> to memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>>
%expand_shape = memref.expand_shape %subview_1 [[0], [1, 2], [3, 4]] : memref<1x16x16xf16, strided<[27648000, 3200, 1], offset: ?>> into memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>>
%subview_2 = memref.subview %expand_shape[0, 0, 0, 0, 0] [1, 1, 16, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x16x16x1xf16, strided<[27648000, 51200, 3200, 1, 1], offset: ?>> to memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>>
%subview_3 = memref.subview %subview_2[0, 0, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf16, strided<[27648000, 51200, 3200, 1], offset: ?>> to memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>
%18 = vector.load %subview_3[%c0, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%19 = vector.load %subview_3[%c1, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%20 = vector.load %subview_3[%c2, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%21 = vector.load %subview_3[%c3, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%22 = vector.load %subview_3[%c4, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%23 = vector.load %subview_3[%c5, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%24 = vector.load %subview_3[%c6, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%25 = vector.load %subview_3[%c7, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%26 = vector.load %subview_3[%c8, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%27 = vector.load %subview_3[%c9, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%28 = vector.load %subview_3[%c10, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%29 = vector.load %subview_3[%c11, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%30 = vector.load %subview_3[%c12, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%31 = vector.load %subview_3[%c13, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%32 = vector.load %subview_3[%c14, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%33 = vector.load %subview_3[%c15, %c0] : memref<16x16xf16, affine_map<(d0, d1)[s0] -> (d0 * 3200 + d1 + s0)>>, vector<16xf16>
%34 = vector.shuffle %18, %19 [0, 16, 1, 17, 4, 20, 5, 21, 8, 24, 9, 25, 12, 28, 13, 29] : vector<
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment