Created
April 23, 2024 08:54
-
-
Save pashu123/8be282c34f8cb2c6c92c4ddfc8c8a160 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// -----// IR Dump After AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
func.func @broadcast_pack_kernel(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> { | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16> | |
%1 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%1 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
} | |
// -----// IR Dump After AutoInputConversionPipeline (iree-auto-input-conversion) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
func.func @broadcast_pack_kernel(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> { | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16> | |
%1 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%1 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
} | |
// -----// IR Dump After IREEImportPublic (iree-import-public) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> { | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16> | |
%1 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%1 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
util.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
} | |
// -----// IR Dump After ImportMLProgram (iree-import-ml-program) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> { | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16> | |
%1 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%1 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
util.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
} | |
// -----// IR Dump After SanitizeModuleNames (iree-sanitize-module-names) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> { | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16> | |
%1 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%1 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
util.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
} | |
// -----// IR Dump After ConvertMeshToFlow (iree-convert-mesh-to-flow) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> { | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16> | |
%1 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%1 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
util.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> { | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16> | |
%1 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%1 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
util.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = util.call @_broadcast_pack_kernel(%2, %3) : (tensor<?x?x3200xf32>, tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %4, %c0 : tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %4 "output0" : tensor<?x540x3200x16x1xf16>{%dim} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
util.func private @_broadcast_pack_kernel(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> { | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16> | |
%1 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%2 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%1 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
util.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func private @_broadcast_pack_kernel(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> { | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x540x3200x16x1xf16> | |
%1 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%1 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %2 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %0 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
util.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c0 = arith.constant 0 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = util.call @_broadcast_pack_kernel(%2, %3) : (tensor<?x?x3200xf32>, tensor<8640x3200xf16>) -> tensor<?x540x3200x16x1xf16> | |
%dim = tensor.dim %4, %c0 : tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %4 "output0" : tensor<?x540x3200x16x1xf16>{%dim} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After DemoteF64ToF32 (iree-util-demote-f64-to-f32) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After RemoveZeroExtentTensors (iree-global-opt-remove-zero-extent-tensors) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After DetachElementwiseFromNamedOps (iree-global-opt-detach-elementwise-from-named-ops) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Convert1X1FilterConv2DToMatmul (iree-global-opt-convert-1x1-filter-conv2d-to-matmul) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After EraseUnusedLinalgOperands (iree-global-opt-erase-unused-linalg-operands) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ExpandTensorShapes (iree-global-opt-expand-tensor-shapes) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOps (iree-global-opt-raise-special-ops) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After DecomposeConcat (iree-global-opt-decompose-concat) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOps (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldUnitExtentDimsPass (iree-flow-fold-unit-extent-dims) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FuseDequantizationMatmul (iree-global-opt-fuse-dequantization-matmul) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SetEncoding (iree-global-opt-set-encoding) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CPUMaterializeUpperBoundTileSize (iree-codegen-cpu-materialize-upper-bound-tile-size) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CPUMaterializeEncoding (iree-codegen-cpu-materialize-encoding) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After MaterializeHomogeneousEncodings (iree-global-opt-materialize-homogeneous-encodings) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SimplifyPackUnpack (iree-global-opt-simplify-pack-unpack) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After DataLayoutPropagation (iree-global-opt-data-layout-propagation) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOps (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After GlobalLoopInvariantCodeMotion (iree-global-opt-loop-invariant-code-motion) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After HoistIntoGlobals (iree-util-hoist-into-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After JitGlobals (iree-consteval-jit-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOps (iree-global-opt-raise-special-ops) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-flow-tensor-pad-to-tensor-insert-slice) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InterchangeGenericOpsPass (iree-flow-interchange-generic-ops) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ResolveShapedTypeResultDims (resolve-shaped-type-result-dims) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-flow-elementwise-op-fusion) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After BubbleUpExpandShapesPass (iree-flow-bubble-up-expand-shapes) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElementwiseOpFusionPass (iree-flow-elementwise-op-fusion) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FusionOfTensorOpsPass (iree-flow-fusion-of-tensor-ops) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SplitReductionPass (iree-flow-split-reduction-ops) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After InterchangeGenericOpsPass (iree-flow-interchange-generic-ops) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormScalarDispatchesPass (iree-flow-form-scalar-dispatches) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %4 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%5 = hal.tensor.export %pack "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormDispatchRegionsPass (iree-flow-form-dispatch-regions) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%c0 = arith.constant 0 : index | |
%4 = flow.dispatch.region -> (tensor<?x540x3200x16x1xf16>{%0}) { | |
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%3 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %2 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
%5 = hal.tensor.export %4 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-flow-clone-producers-into-dispatch-regions) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%c0 = arith.constant 0 : index | |
%4 = flow.dispatch.region -> (tensor<?x540x3200x16x1xf16>{%0}) { | |
%6 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%7 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%7 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %6 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
%5 = hal.tensor.export %4 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CollapseDimensionsPass (iree-flow-collapse-dimensions) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%3 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%c0 = arith.constant 0 : index | |
%4 = flow.dispatch.region -> (tensor<?x540x3200x16x1xf16>{%0}) { | |
%6 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%7 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%1 : tensor<8640x3200xf16>) outs(%7 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %6 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
%5 = hal.tensor.export %4 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormDispatchWorkgroupsPass (iree-flow-form-dispatch-workgroups) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%4 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%5 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%6 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16> | |
%7 = tensor.empty(%4) : tensor<?x8640x3200xf16> | |
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%5 : tensor<8640x3200xf16>) outs(%7 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %6 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %arg4, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%arg3} | |
%5 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%6 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%7 = tensor.empty(%5) : tensor<?x540x3200x16x1xf16> | |
%8 = tensor.empty(%5) : tensor<?x8640x3200xf16> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%6 : tensor<8640x3200xf16>) outs(%8 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%5, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%5} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%4 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%5 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4} | |
%6 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%7 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16> | |
%8 = tensor.empty(%4) : tensor<?x8640x3200xf16> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%6 : tensor<8640x3200xf16>) outs(%8 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %5, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%4 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%5 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4} | |
%6 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%7 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16> | |
%8 = tensor.empty(%4) : tensor<?x8640x3200xf16> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%6 : tensor<8640x3200xf16>) outs(%8 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %5, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%4 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%5 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4} | |
%6 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%7 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16> | |
%8 = tensor.empty(%4) : tensor<?x8640x3200xf16> | |
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%6 : tensor<8640x3200xf16>) outs(%8 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %5, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch.workgroups[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%4 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%5 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4} | |
%6 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%7 = tensor.empty(%4) : tensor<?x540x3200x16x1xf16> | |
%8 = tensor.empty(%4) : tensor<?x8640x3200xf16> | |
%9 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%6 : tensor<8640x3200xf16>) outs(%8 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %5, offsets = [0, 0, 0, 0, 0], sizes = [%4, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%4} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OutlineConstants (iree-util-outline-constants) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @broadcast_pack_kernel_dispatch_0 { | |
flow.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%2 = flow.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%3 = hal.tensor.export %2 "output0" : tensor<?x540x3200x16x1xf16>{%0} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%c0 = arith.constant 0 : index | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%c0 = arith.constant 0 : index | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CombineInitializers (iree-util-combine-initializers) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%1} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} -> !stream.resource<*>{%1} | |
%4 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%3[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, index) -> !stream.resource<*>{%4} | |
%6 = stream.async.transfer %5 : !stream.resource<*>{%4} -> !stream.resource<external>{%4} | |
%7 = stream.tensor.export %6 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%4} -> !hal.buffer_view | |
util.return %7 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%3 = arith.muli %0, %c55296000 : index | |
%4 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%3} -> !stream.resource<external>{%3} | |
%6 = stream.tensor.export %5 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%3} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%1[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.timepoint.immediate => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute await(%3) => with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%6 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %6 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%3 = stream.timepoint.immediate => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute await(%3) => with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%6 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %6 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%results, %result_timepoint = stream.async.execute with(%1 as %arg2: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%2} { | |
%5 = stream.async.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%arg2[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<external>{%2} | |
stream.yield %5 : !stream.resource<external>{%2} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%2} | |
%4 = stream.tensor.export %3 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ScheduleAllocationPass (iree-stream-schedule-allocation) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0_0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0_0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0_0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After PropagateSubranges (iree-util-propagate-subranges) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0_0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SCFToControlFlow (convert-scf-to-cf) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ElideTimepointsPass (iree-stream-elide-timepoints) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseDispatchBindingsPass (iree-stream-fuse-dispatch-bindings) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%arg2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg4, 0 : index | |
%2 = stream.binding.subspan %arg1[%arg3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0, %c0, %0 : index, index, index) { | |
ro %arg2[%c0_0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0_0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AnnotateDispatchArgumentsPass (iree-stream-annotate-dispatch-arguments) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: index {stream.values = [0 : index]}, %arg3: index {stream.values = [0 : index]}, %arg4: index) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%arg2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg4, 0 : index | |
%2 = stream.binding.subspan %arg1[%arg3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%3 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0, %c0, %0 : index, index, index) { | |
ro %arg2[%c0_0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0_0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%2} | |
%5 = stream.tensor.export %4 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PackDispatchOperandsPass (iree-stream-pack-dispatch-operands) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) { | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%c32_i64 = arith.constant 32 : i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg4 : i32 to i64 | |
%6 = arith.extui %arg5 : i32 to i64 | |
%c32_i64_0 = arith.constant 32 : i64 | |
%7 = arith.shli %6, %c32_i64_0 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg6 : i32 to i64 | |
%11 = arith.extui %arg7 : i32 to i64 | |
%c32_i64_1 = arith.constant 32 : i64 | |
%12 = arith.shli %11, %c32_i64_1 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 : i64 to index | |
%c0 = arith.constant 0 : index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%16 = flow.dispatch.workload.ordinal %14, 0 : index | |
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%19 = tensor.empty(%16) : tensor<?x540x3200x16x1xf16> | |
%20 = tensor.empty(%16) : tensor<?x8640x3200xf16> | |
%21 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%18 : tensor<8640x3200xf16>) outs(%20 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %21 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%16, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%c0_i64 = arith.constant 0 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i64_1 = arith.constant 0 : i64 | |
%c0_i32_2 = arith.constant 0 : i32 | |
%c0_i64_3 = arith.constant 0 : i64 | |
%c0_i32_4 = arith.constant 0 : i32 | |
%c32_i64_5 = arith.constant 32 : i64 | |
%c0_i64_6 = arith.constant 0 : i64 | |
%c0_i32_7 = arith.constant 0 : i32 | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%c32_i64_8 = arith.constant 32 : i64 | |
%5 = arith.shrui %3, %c32_i64_8 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0_i32, %c0_i32_2, %c0_i32_4, %c0_i32_7, %4, %6 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0_0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0_0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg4 : i32 to i64 | |
%6 = arith.extui %arg5 : i32 to i64 | |
%7 = arith.shli %6, %c32_i64 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg6 : i32 to i64 | |
%11 = arith.extui %arg7 : i32 to i64 | |
%12 = arith.shli %11, %c32_i64 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%16 = flow.dispatch.workload.ordinal %14, 0 : index | |
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%19 = tensor.empty(%16) : tensor<?x540x3200x16x1xf16> | |
%20 = tensor.empty(%16) : tensor<?x8640x3200xf16> | |
%21 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%18 : tensor<8640x3200xf16>) outs(%20 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %21 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%16, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg4 : i32 to i64 | |
%6 = arith.extui %arg5 : i32 to i64 | |
%7 = arith.shli %6, %c32_i64 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg6 : i32 to i64 | |
%11 = arith.extui %arg7 : i32 to i64 | |
%12 = arith.shli %11, %c32_i64 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%16 = flow.dispatch.workload.ordinal %14, 0 : index | |
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%19 = tensor.empty(%16) : tensor<?x540x3200x16x1xf16> | |
%20 = tensor.empty(%16) : tensor<?x8640x3200xf16> | |
%21 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%18 : tensor<8640x3200xf16>) outs(%20 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %21 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%16, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg4 : i32 to i64 | |
%6 = arith.extui %arg5 : i32 to i64 | |
%7 = arith.shli %6, %c32_i64 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg6 : i32 to i64 | |
%11 = arith.extui %arg7 : i32 to i64 | |
%12 = arith.shli %11, %c32_i64 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%16 = flow.dispatch.workload.ordinal %14, 0 : index | |
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%19 = tensor.empty(%16) : tensor<?x540x3200x16x1xf16> | |
%20 = tensor.empty(%16) : tensor<?x8640x3200xf16> | |
%21 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%18 : tensor<8640x3200xf16>) outs(%20 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %21 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%16, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg4 : i32 to i64 | |
%6 = arith.extui %arg5 : i32 to i64 | |
%7 = arith.shli %6, %c32_i64 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg6 : i32 to i64 | |
%11 = arith.extui %arg7 : i32 to i64 | |
%12 = arith.shli %11, %c32_i64 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%16 = flow.dispatch.workload.ordinal %14, 0 : index | |
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%19 = tensor.empty(%16) : tensor<?x540x3200x16x1xf16> | |
%20 = tensor.empty(%16) : tensor<?x8640x3200xf16> | |
%21 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%18 : tensor<8640x3200xf16>) outs(%20 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %21 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%16, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%c0_i32, %c0_i32, %c0_i32, %c0_i32, %4, %6 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldUniformOperandsPass (iree-stream-fold-uniform-operands) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c0_i32 = arith.constant 0 : i32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %c0_i32 : i32 to i64 | |
%1 = arith.extui %c0_i32 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %c0_i32 : i32 to i64 | |
%6 = arith.extui %c0_i32 : i32 to i64 | |
%7 = arith.shli %6, %c32_i64 : i64 | |
%8 = arith.ori %5, %7 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg2 : i32 to i64 | |
%11 = arith.extui %arg3 : i32 to i64 | |
%12 = arith.shli %11, %c32_i64 : i64 | |
%13 = arith.ori %10, %12 : i64 | |
%14 = arith.index_castui %13 : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%16 = flow.dispatch.workload.ordinal %14, 0 : index | |
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%19 = tensor.empty(%16) : tensor<?x540x3200x16x1xf16> | |
%20 = tensor.empty(%16) : tensor<?x8640x3200xf16> | |
%21 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%18 : tensor<8640x3200xf16>) outs(%20 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %21 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %19 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %17, offsets = [0, 0, 0, 0, 0], sizes = [%16, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%16} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyTargetEnvironmentPass (iree-hal-verify-target-environment) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @broadcast_pack_kernel_dispatch_0 { | |
stream.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%0 = arith.extui %arg2 : i32 to i64 | |
%1 = arith.extui %arg3 : i32 to i64 | |
%2 = arith.shli %1, %c32_i64 : i64 | |
%3 = arith.ori %0, %2 : i64 | |
%4 = arith.index_castui %3 : i64 to index | |
%5 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%6 = flow.dispatch.workload.ordinal %4, 0 : index | |
%7 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%8 = flow.dispatch.tensor.load %5, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%9 = tensor.empty(%6) : tensor<?x540x3200x16x1xf16> | |
%10 = tensor.empty(%6) : tensor<?x8640x3200xf16> | |
%11 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%8 : tensor<8640x3200xf16>) outs(%10 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %9 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %7, offsets = [0, 0, 0, 0, 0], sizes = [%6, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
return | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeInterfacesPass (iree-hal-materialize-interfaces) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#pipeline_layout = #hal.pipeline.layout<push_constants = 2, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer>]>]> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
hal.executable private @broadcast_pack_kernel_dispatch_0 { | |
hal.executable.variant public @embedded_elf_x86_64 target(#executable_target_embedded_elf_x86_64_) { | |
hal.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack ordinal(0) layout(#pipeline_layout) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>]} { | |
^bb0(%arg0: !hal.device, %arg1: index): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1 | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@embedded_elf_x86_64::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PruneExecutablesPass (iree-hal-prune-executables) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#pipeline_layout = #hal.pipeline.layout<push_constants = 2, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer>]>]> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
hal.executable private @broadcast_pack_kernel_dispatch_0 { | |
hal.executable.variant public @embedded_elf_x86_64 target(#executable_target_embedded_elf_x86_64_) { | |
hal.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack ordinal(0) layout(#pipeline_layout) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>]} { | |
^bb0(%arg0: !hal.device, %arg1: index): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1 | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
} | |
} | |
} | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@embedded_elf_x86_64::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CPUMaterializeUpperBoundTileSize (iree-codegen-cpu-materialize-upper-bound-tile-size) //----- // | |
util.func public @broadcast_pack_kernel(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @broadcast_pack_kernel(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x540x3200x16x1xf16>)"}} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c55296000 = arith.constant 55296000 : index | |
%c0 = arith.constant 0 : index | |
%c3200 = arith.constant 3200 : index | |
%c8640 = arith.constant 8640 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%2 = arith.muli %0, %c55296000 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%2} => !stream.timepoint | |
%3 = arith.index_castui %0 : index to i64 | |
%4 = arith.trunci %3 : i64 to i32 | |
%5 = arith.shrui %3, %c32_i64 : i64 | |
%6 = arith.trunci %5 : i64 to i32 | |
%7 = stream.cmd.execute await(%result_timepoint) => with(%1 as %arg2: !stream.resource<external>{%c55296000}, %result as %arg3: !stream.resource<external>{%2}) { | |
stream.cmd.dispatch @broadcast_pack_kernel_dispatch_0::@embedded_elf_x86_64::@broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack[%0](%4, %6 : i32, i32) { | |
ro %arg2[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg3[%c0 for %2] : !stream.resource<external>{%2} | |
} | |
} => !stream.timepoint | |
%8 = stream.timepoint.await %7 => %result : !stream.resource<external>{%2} | |
%9 = stream.tensor.export %8 : tensor<?x540x3200x16x1xf16>{%0} in !stream.resource<external>{%2} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After TypePropagation (iree-codegen-type-propagation) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
// -----// IR Dump After BubbleUpOrdinalOps (iree-codegen-bubble-up-ordinal-ops) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
// -----// IR Dump After BufferizeCopyOnlyDispatches (iree-codegen-bufferize-copy-only-dispatches) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
// -----// IR Dump After DecomposeSoftmax (iree-codegen-decompose-softmax) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
// -----// IR Dump After MaterializeUserConfigs (iree-codegen-materialize-user-configs) //----- // | |
module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
} | |
// -----// IR Dump After RematerializeParallelOps (iree-codegen-rematerialize-parallel-ops) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
// -----// IR Dump After ExpandF16OpToF32 (iree-llvmcpu-expand-f16-op-to-f32) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
// -----// IR Dump After CPUMaterializeEncoding (iree-codegen-cpu-materialize-encoding) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
// -----// IR Dump After EraseHALDescriptorTypeFromMemRef (iree-codegen-erase-hal-descriptor-type-from-memref) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
// -----// IR Dump After LLVMCPUSelectLoweringStrategy (iree-llvmcpu-select-lowering-strategy) //----- // | |
module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
} | |
// -----// IR Dump After ConfigureTargetExecutableVariantsPass (iree-hal-configure-target-executable-variants) //----- // | |
hal.executable.variant public @embedded_elf_x86_64 target(<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>) { | |
hal.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack ordinal(0) layout(#hal.pipeline.layout<push_constants = 2, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer>]>]>) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>]} { | |
^bb0(%arg0: !hal.device, %arg1: index): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1 | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After ConfigureExecutablesPass (iree-hal-configure-executables) //----- // | |
hal.executable private @broadcast_pack_kernel_dispatch_0 { | |
hal.executable.variant public @embedded_elf_x86_64 target(<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>) { | |
hal.executable.export public @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack ordinal(0) layout(#hal.pipeline.layout<push_constants = 2, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer>]>]>) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>]} { | |
^bb0(%arg0: !hal.device, %arg1: index): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1 | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
} | |
} | |
} | |
// -----// IR Dump After LowerExecutableUsingTransformDialect (iree-codegen-lower-executable-using-transform-dialect) //----- // | |
module { | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = flow.dispatch.workload.ordinal %6, 0 : index | |
%9 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
%10 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%11 = tensor.empty(%8) : tensor<?x540x3200x16x1xf16> | |
%12 = tensor.empty(%8) : tensor<?x8640x3200xf16> | |
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%10 : tensor<8640x3200xf16>) outs(%12 : tensor<?x8640x3200xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %11 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %9, offsets = [0, 0, 0, 0, 0], sizes = [%8, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%8} | |
return | |
} | |
} | |
// -----// IR Dump After TileAndDistributeToWorkgroups (iree-codegen-tile-and-distribute-to-workgroups) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%17 = flow.dispatch.tensor.load %7, offsets = [%16, %arg2], sizes = [%c64, %c64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<?x?xf16> | |
%18 = tensor.empty(%11) : tensor<?x64x64xf16> | |
%cast = tensor.cast %17 : tensor<?x?xf16> to tensor<64x64xf16> | |
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cast : tensor<64x64xf16>) outs(%18 : tensor<?x64x64xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x64x64xf16> | |
%20 = tensor.empty(%11) : tensor<?x4x64x16x1xf16> | |
%pack = tensor.pack %19 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %20 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x64x64xf16> -> tensor<?x4x64x16x1xf16> | |
%cast_0 = tensor.cast %pack : tensor<?x4x64x16x1xf16> to tensor<?x?x?x16x1xf16> | |
%21 = arith.extui %0 : i32 to i64 | |
%22 = arith.extui %1 : i32 to i64 | |
%23 = arith.shli %22, %c32_i64 : i64 | |
%24 = arith.ori %21, %23 : i64 | |
%25 = arith.index_castui %24 : i64 to index | |
flow.dispatch.tensor.store %cast_0, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, %c4, %c64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x?x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%25} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After ConvertToDestinationPassingStyle (iree-codegen-convert-to-destination-passing-style) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = arith.extui %0 : i32 to i64 | |
%17 = arith.extui %1 : i32 to i64 | |
%18 = arith.shli %17, %c32_i64 : i64 | |
%19 = arith.ori %16, %18 : i64 | |
%20 = arith.index_castui %19 : i64 to index | |
%21 = arith.extui %0 : i32 to i64 | |
%22 = arith.extui %1 : i32 to i64 | |
%23 = arith.shli %22, %c32_i64 : i64 | |
%24 = arith.ori %21, %23 : i64 | |
%25 = arith.index_castui %24 : i64 to index | |
%26 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%25] | |
%27 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%26, %c4, %c64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%20} -> tensor<?x?x?x16x1xf16> | |
%cast = tensor.cast %27 : tensor<?x?x?x16x1xf16> to tensor<?x4x64x16x1xf16> | |
%28 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%29 = flow.dispatch.tensor.load %7, offsets = [%28, %arg2], sizes = [%c64, %c64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<?x?xf16> | |
%30 = tensor.empty(%11) : tensor<?x64x64xf16> | |
%cast_0 = tensor.cast %29 : tensor<?x?xf16> to tensor<64x64xf16> | |
%31 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cast_0 : tensor<64x64xf16>) outs(%30 : tensor<?x64x64xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x64x64xf16> | |
%pack = tensor.pack %31 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %cast {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x64x64xf16> -> tensor<?x4x64x16x1xf16> | |
%cast_1 = tensor.cast %pack : tensor<?x4x64x16x1xf16> to tensor<?x?x?x16x1xf16> | |
%32 = arith.extui %0 : i32 to i64 | |
%33 = arith.extui %1 : i32 to i64 | |
%34 = arith.shli %33, %c32_i64 : i64 | |
%35 = arith.ori %32, %34 : i64 | |
%36 = arith.index_castui %35 : i64 to index | |
flow.dispatch.tensor.store %cast_1, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, %c4, %c64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x?x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%36} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After FoldAffineMinInDistributedLoops (iree-codegen-fold-affinemin-in-distributed-loops) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = arith.extui %0 : i32 to i64 | |
%17 = arith.extui %1 : i32 to i64 | |
%18 = arith.shli %17, %c32_i64 : i64 | |
%19 = arith.ori %16, %18 : i64 | |
%20 = arith.index_castui %19 : i64 to index | |
%21 = arith.extui %0 : i32 to i64 | |
%22 = arith.extui %1 : i32 to i64 | |
%23 = arith.shli %22, %c32_i64 : i64 | |
%24 = arith.ori %21, %23 : i64 | |
%25 = arith.index_castui %24 : i64 to index | |
%26 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%25] | |
%27 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%26, %c4, %c64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%20} -> tensor<?x?x?x16x1xf16> | |
%cast = tensor.cast %27 : tensor<?x?x?x16x1xf16> to tensor<?x4x64x16x1xf16> | |
%28 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%29 = flow.dispatch.tensor.load %7, offsets = [%28, %arg2], sizes = [%c64, %c64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<?x?xf16> | |
%30 = tensor.empty(%11) : tensor<?x64x64xf16> | |
%cast_0 = tensor.cast %29 : tensor<?x?xf16> to tensor<64x64xf16> | |
%31 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%cast_0 : tensor<64x64xf16>) outs(%30 : tensor<?x64x64xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x64x64xf16> | |
%pack = tensor.pack %31 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %cast {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x64x64xf16> -> tensor<?x4x64x16x1xf16> | |
%cast_1 = tensor.cast %pack : tensor<?x4x64x16x1xf16> to tensor<?x?x?x16x1xf16> | |
%32 = arith.extui %0 : i32 to i64 | |
%33 = arith.extui %1 : i32 to i64 | |
%34 = arith.shli %33, %c32_i64 : i64 | |
%35 = arith.ori %32, %34 : i64 | |
%36 = arith.index_castui %35 : i64 to index | |
flow.dispatch.tensor.store %cast_1, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, %c4, %c64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x?x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%36} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = arith.extui %0 : i32 to i64 | |
%17 = arith.extui %1 : i32 to i64 | |
%18 = arith.shli %17, %c32_i64 : i64 | |
%19 = arith.ori %16, %18 : i64 | |
%20 = arith.index_castui %19 : i64 to index | |
%21 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%20] | |
%22 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%21, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%23 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%24 = flow.dispatch.tensor.load %7, offsets = [%23, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%25 = tensor.empty(%11) : tensor<?x64x64xf16> | |
%26 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%24 : tensor<64x64xf16>) outs(%25 : tensor<?x64x64xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x64x64xf16> | |
%pack = tensor.pack %26 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %22 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x64x64xf16> -> tensor<?x4x64x16x1xf16> | |
flow.dispatch.tensor.store %pack, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = tensor.empty(%11) : tensor<?x64x64xf16> | |
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%18 : tensor<64x64xf16>) outs(%19 : tensor<?x64x64xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x64x64xf16> | |
%pack = tensor.pack %20 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %16 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x64x64xf16> -> tensor<?x4x64x16x1xf16> | |
flow.dispatch.tensor.store %pack, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After FuseTensorPadWithConsumer (iree-codegen-fuse-tensor-pad-with-consumer) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = tensor.empty(%11) : tensor<?x64x64xf16> | |
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%18 : tensor<64x64xf16>) outs(%19 : tensor<?x64x64xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x64x64xf16> | |
%pack = tensor.pack %20 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %16 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x64x64xf16> -> tensor<?x4x64x16x1xf16> | |
flow.dispatch.tensor.store %pack, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After ConcretizePadResultShape (iree-codegen-concretize-pad-result-shape) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = tensor.empty(%11) : tensor<?x64x64xf16> | |
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%18 : tensor<64x64xf16>) outs(%19 : tensor<?x64x64xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x64x64xf16> | |
%pack = tensor.pack %20 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %16 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<?x64x64xf16> -> tensor<?x4x64x16x1xf16> | |
flow.dispatch.tensor.store %pack, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPUTileAndFuse (iree-llvmcpu-tile-and-fuse) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<1x16x1xf16> -> tensor<1x1x1x16x1xf16> | |
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After FuseTensorPadWithConsumer (iree-codegen-fuse-tensor-pad-with-consumer) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<1x16x1xf16> -> tensor<1x1x1x16x1xf16> | |
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After ConcretizePadResultShape (iree-codegen-concretize-pad-result-shape) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<1x16x1xf16> -> tensor<1x1x1x16x1xf16> | |
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPUSplitReduction (iree-llvmcpu-split-reduction) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<1x16x1xf16> -> tensor<1x1x1x16x1xf16> | |
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPUTile (iree-llvmcpu-tile) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<1x16x1xf16> -> tensor<1x1x1x16x1xf16> | |
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPUTileAndFuse (iree-llvmcpu-tile-and-fuse) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<1x16x1xf16> -> tensor<1x1x1x16x1xf16> | |
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After FuseTensorPadWithConsumer (iree-codegen-fuse-tensor-pad-with-consumer) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<1x16x1xf16> -> tensor<1x1x1x16x1xf16> | |
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After ConcretizePadResultShape (iree-codegen-concretize-pad-result-shape) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<1x16x1xf16> -> tensor<1x1x1x16x1xf16> | |
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After TensorToVectorVectorizePad (iree-codegen-vectorize-tensor-pad) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%pack = tensor.pack %24 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %extracted_slice_0 {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 4, 64], [1, 1, 1], [0, 0, 0], [0, 0, 0]]>} : tensor<1x16x1xf16> -> tensor<1x1x1x16x1xf16> | |
%inserted_slice = tensor.insert_slice %pack into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After DecomposePackUnPackOps (iree-codegen-decompose-pack-unpack-ops) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %24[0, 0, 0] [1, 16, 1] [1, 1, 1] : tensor<1x16x1xf16> to tensor<16x1xf16> | |
%inserted_slice = tensor.insert_slice %extracted_slice_1 into %extracted_slice_0[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<16x1xf16> into tensor<1x1x1x16x1xf16> | |
%inserted_slice_2 = tensor.insert_slice %inserted_slice into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice_2 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %24[0, 0, 0] [1, 16, 1] [1, 1, 1] : tensor<1x16x1xf16> to tensor<16x1xf16> | |
%inserted_slice = tensor.insert_slice %extracted_slice_1 into %extracted_slice_0[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<16x1xf16> into tensor<1x1x1x16x1xf16> | |
%inserted_slice_2 = tensor.insert_slice %inserted_slice into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice_2 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%extracted_slice : tensor<16x1xf16>) outs(%23 : tensor<1x16x1xf16>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64, 64], [1, 16, 1], [0, 0, 0], [0, 0, 0]]>} { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %24[0, 0, 0] [1, 16, 1] [1, 1, 1] : tensor<1x16x1xf16> to tensor<16x1xf16> | |
%inserted_slice = tensor.insert_slice %extracted_slice_1 into %extracted_slice_0[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<16x1xf16> into tensor<1x1x1x16x1xf16> | |
%inserted_slice_2 = tensor.insert_slice %inserted_slice into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice_2 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After GenericVectorization (iree-codegen-generic-vectorization) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%11 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
scf.for %arg1 = %12 to %c540 step %13 { | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%15 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg2 = %14 to %c3200 step %15 { | |
%16 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
%18 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%19 = scf.for %arg3 = %c0 to %11 step %c1 iter_args(%arg4 = %16) -> (tensor<?x4x64x16x1xf16>) { | |
%20 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%extracted_slice = tensor.extract_slice %18[%22, %arg7] [16, 1] [1, 1] : tensor<64x64xf16> to tensor<16x1xf16> | |
%23 = tensor.empty() : tensor<1x16x1xf16> | |
%24 = vector.transfer_read %extracted_slice[%c0, %c0], %cst {in_bounds = [true, true]} : tensor<16x1xf16>, vector<16x1xf16> | |
%25 = vector.broadcast %24 : vector<16x1xf16> to vector<1x16x1xf16> | |
%26 = vector.transfer_write %25, %23[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, tensor<1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%extracted_slice_1 = tensor.extract_slice %26[0, 0, 0] [1, 16, 1] [1, 1, 1] : tensor<1x16x1xf16> to tensor<16x1xf16> | |
%inserted_slice = tensor.insert_slice %extracted_slice_1 into %extracted_slice_0[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<16x1xf16> into tensor<1x1x1x16x1xf16> | |
%inserted_slice_2 = tensor.insert_slice %inserted_slice into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice_2 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %20 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %19, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%11, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After OptimizeTensorInsertExtractSlices (iree-codegen-optimize-tensor-insert-extract-slices) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
%15 = tensor.empty() : tensor<1x16x1xf16> | |
scf.for %arg0 = %9 to %6 step %10 { | |
%16 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%18 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%19 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%20 = scf.for %arg3 = %c0 to %16 step %c1 iter_args(%arg4 = %18) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%23 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%24 = vector.transfer_read %19[%22, %arg7], %cst {in_bounds = [true, true]} : tensor<64x64xf16>, vector<16x1xf16> | |
%25 = vector.broadcast %24 : vector<16x1xf16> to vector<1x16x1xf16> | |
%26 = vector.transfer_write %25, %15[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, tensor<1x16x1xf16> | |
%extracted_slice = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %26[0, 0, 0] [1, 16, 1] [1, 1, 1] : tensor<1x16x1xf16> to tensor<16x1xf16> | |
%inserted_slice = tensor.insert_slice %extracted_slice_0 into %extracted_slice[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<16x1xf16> into tensor<1x1x1x16x1xf16> | |
%inserted_slice_1 = tensor.insert_slice %inserted_slice into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice_1 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %23 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %20, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
%15 = tensor.empty() : tensor<1x16x1xf16> | |
scf.for %arg0 = %9 to %6 step %10 { | |
%16 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%18 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%19 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%20 = scf.for %arg3 = %c0 to %16 step %c1 iter_args(%arg4 = %18) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%23 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%24 = vector.transfer_read %19[%22, %arg7], %cst {in_bounds = [true, true]} : tensor<64x64xf16>, vector<16x1xf16> | |
%25 = vector.broadcast %24 : vector<16x1xf16> to vector<1x16x1xf16> | |
%26 = vector.transfer_write %25, %15[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, tensor<1x16x1xf16> | |
%extracted_slice = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %26[0, 0, 0] [1, 16, 1] [1, 1, 1] : tensor<1x16x1xf16> to tensor<16x1xf16> | |
%inserted_slice = tensor.insert_slice %extracted_slice_0 into %extracted_slice[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<16x1xf16> into tensor<1x1x1x16x1xf16> | |
%inserted_slice_1 = tensor.insert_slice %inserted_slice into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice_1 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %23 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %20, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
%15 = tensor.empty() : tensor<1x16x1xf16> | |
scf.for %arg0 = %9 to %6 step %10 { | |
%16 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%18 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%19 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%20 = scf.for %arg3 = %c0 to %16 step %c1 iter_args(%arg4 = %18) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%23 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%24 = vector.transfer_read %19[%22, %arg7], %cst {in_bounds = [true, true]} : tensor<64x64xf16>, vector<16x1xf16> | |
%25 = vector.broadcast %24 : vector<16x1xf16> to vector<1x16x1xf16> | |
%26 = vector.transfer_write %25, %15[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, tensor<1x16x1xf16> | |
%extracted_slice = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %26[0, 0, 0] [1, 16, 1] [1, 1, 1] : tensor<1x16x1xf16> to tensor<16x1xf16> | |
%inserted_slice = tensor.insert_slice %extracted_slice_0 into %extracted_slice[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<16x1xf16> into tensor<1x1x1x16x1xf16> | |
%inserted_slice_1 = tensor.insert_slice %inserted_slice into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice_1 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %23 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %20, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After EliminateEmptyTensors (iree-eliminate-empty-tensors) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
%15 = tensor.empty() : tensor<1x16x1xf16> | |
scf.for %arg0 = %9 to %6 step %10 { | |
%16 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%18 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%19 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%20 = scf.for %arg3 = %c0 to %16 step %c1 iter_args(%arg4 = %18) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%23 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%24 = vector.transfer_read %19[%22, %arg7], %cst {in_bounds = [true, true]} : tensor<64x64xf16>, vector<16x1xf16> | |
%25 = vector.broadcast %24 : vector<16x1xf16> to vector<1x16x1xf16> | |
%26 = vector.transfer_write %25, %15[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, tensor<1x16x1xf16> | |
%extracted_slice = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %26[0, 0, 0] [1, 16, 1] [1, 1, 1] : tensor<1x16x1xf16> to tensor<16x1xf16> | |
%inserted_slice = tensor.insert_slice %extracted_slice_0 into %extracted_slice[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<16x1xf16> into tensor<1x1x1x16x1xf16> | |
%inserted_slice_1 = tensor.insert_slice %inserted_slice into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice_1 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %23 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %20, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After EmptyTensorToAllocTensor (empty-tensor-to-alloc-tensor) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c64 = arith.constant 64 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%c3200 = arith.constant 3200 : index | |
%c540 = arith.constant 540 : index | |
%c0 = arith.constant 0 : index | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
%15 = bufferization.alloc_tensor() : tensor<1x16x1xf16> | |
scf.for %arg0 = %9 to %6 step %10 { | |
%16 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%18 = flow.dispatch.tensor.load %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} -> tensor<?x4x64x16x1xf16> | |
%19 = flow.dispatch.tensor.load %7, offsets = [%17, %arg2], sizes = [64, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<64x64xf16> | |
%20 = scf.for %arg3 = %c0 to %16 step %c1 iter_args(%arg4 = %18) -> (tensor<?x4x64x16x1xf16>) { | |
%21 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (tensor<?x4x64x16x1xf16>) { | |
%22 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%23 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (tensor<?x4x64x16x1xf16>) { | |
%24 = vector.transfer_read %19[%22, %arg7], %cst {in_bounds = [true, true]} : tensor<64x64xf16>, vector<16x1xf16> | |
%25 = vector.broadcast %24 : vector<16x1xf16> to vector<1x16x1xf16> | |
%26 = vector.transfer_write %25, %15[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, tensor<1x16x1xf16> | |
%extracted_slice = tensor.extract_slice %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> to tensor<1x1x1x16x1xf16> | |
%extracted_slice_0 = tensor.extract_slice %26[0, 0, 0] [1, 16, 1] [1, 1, 1] : tensor<1x16x1xf16> to tensor<16x1xf16> | |
%inserted_slice = tensor.insert_slice %extracted_slice_0 into %extracted_slice[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<16x1xf16> into tensor<1x1x1x16x1xf16> | |
%inserted_slice_1 = tensor.insert_slice %inserted_slice into %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : tensor<1x1x1x16x1xf16> into tensor<?x4x64x16x1xf16> | |
scf.yield %inserted_slice_1 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %23 : tensor<?x4x64x16x1xf16> | |
} | |
scf.yield %21 : tensor<?x4x64x16x1xf16> | |
} | |
flow.dispatch.tensor.store %20, %8, offsets = [%arg0, %arg1, %arg2, 0, 0], sizes = [%16, 4, 64, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x4x64x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%6} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After IREEComprehensiveBufferize (iree-codegen-iree-comprehensive-bufferize) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%17 = scf.for %arg3 = %c0 to %15 step %c1 iter_args(%arg4 = %subview) -> (memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
%18 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
%19 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%20 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
%21 = vector.transfer_read %subview_0[%19, %arg7], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%22 = vector.broadcast %21 : vector<16x1xf16> to vector<1x16x1xf16> | |
vector.transfer_write %22, %alloca[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, memref<1x16x1xf16> | |
%subview_2 = memref.subview %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_3 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_4 = memref.subview %subview_2[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_3 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_4 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
%subview_5 = memref.subview %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_2 : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_5 : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
scf.yield %arg8 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
} | |
scf.yield %20 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
} | |
scf.yield %18 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
} | |
%subview_1 = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%17 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_1 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After ResolveShapedTypeResultDims (resolve-shaped-type-result-dims) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%17 = scf.for %arg3 = %c0 to %15 step %c1 iter_args(%arg4 = %subview) -> (memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
%18 = scf.for %arg5 = %c0 to %c4 step %c1 iter_args(%arg6 = %arg4) -> (memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
%19 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg5) | |
%20 = scf.for %arg7 = %c0 to %c64 step %c1 iter_args(%arg8 = %arg6) -> (memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
%21 = vector.transfer_read %subview_0[%19, %arg7], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%22 = vector.broadcast %21 : vector<16x1xf16> to vector<1x16x1xf16> | |
vector.transfer_write %22, %alloca[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, memref<1x16x1xf16> | |
%subview_2 = memref.subview %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_3 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_4 = memref.subview %subview_2[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_3 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_4 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
%subview_5 = memref.subview %arg8[%arg3, %arg5, %arg7, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_2 : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_5 : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
scf.yield %arg8 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
} | |
scf.yield %20 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
} | |
scf.yield %18 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
} | |
%subview_1 = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%17 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_1 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%18 = vector.transfer_read %subview_0[%17, %arg5], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%19 = vector.broadcast %18 : vector<16x1xf16> to vector<1x16x1xf16> | |
vector.transfer_write %19, %alloca[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, memref<1x16x1xf16> | |
%subview_2 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_3 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_4 = memref.subview %subview_2[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_3 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_4 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
%subview_5 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_2 : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_5 : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
%subview_1 = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_1 : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%18 = vector.transfer_read %subview_0[%17, %arg5], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%19 = vector.broadcast %18 : vector<16x1xf16> to vector<1x16x1xf16> | |
vector.transfer_write %19, %alloca[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, memref<1x16x1xf16> | |
%subview_1 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_2 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_3 = memref.subview %subview_1[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_2 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_3 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_1 : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_1 : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%18 = vector.transfer_read %subview_0[%17, %arg5], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%19 = vector.broadcast %18 : vector<16x1xf16> to vector<1x16x1xf16> | |
vector.transfer_write %19, %alloca[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, memref<1x16x1xf16> | |
%subview_1 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_2 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_3 = memref.subview %subview_1[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_2 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_3 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After CleanupBufferAllocView (iree-codegen-cleanup-buffer-alloc-view) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%18 = vector.transfer_read %subview_0[%17, %arg5], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%19 = vector.broadcast %18 : vector<16x1xf16> to vector<1x16x1xf16> | |
vector.transfer_write %19, %alloca[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, memref<1x16x1xf16> | |
%subview_1 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_2 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_3 = memref.subview %subview_1[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_2 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_3 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After RemoveSingleIterationLoop (iree-codegen-remove-single-iteration-loop) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%18 = vector.transfer_read %subview_0[%17, %arg5], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%19 = vector.broadcast %18 : vector<16x1xf16> to vector<1x16x1xf16> | |
vector.transfer_write %19, %alloca[%c0, %c0, %c0] {in_bounds = [true, true, true]} : vector<1x16x1xf16>, memref<1x16x1xf16> | |
%subview_1 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_2 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_3 = memref.subview %subview_1[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_2 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_3 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPUDropVectorUnitDims (iree-llvmcpu-drop-vector-unit-dims) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%18 = vector.transfer_read %subview_0[%17, %arg5], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%subview_1 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<1x16xf16, strided<[16, 1]>> | |
%19 = vector.shape_cast %18 : vector<16x1xf16> to vector<16xf16> | |
%subview_2 = memref.subview %subview_1[0, 0] [1, 16] [1, 1] : memref<1x16xf16, strided<[16, 1]>> to memref<16xf16, strided<[1]>> | |
vector.transfer_write %19, %subview_2[%c0] {in_bounds = [true]} : vector<16xf16>, memref<16xf16, strided<[1]>> | |
%subview_3 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_4 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_5 = memref.subview %subview_3[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_4 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_5 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPUVirtualVectorLowering (iree-llvmcpu-virtual-vector-lowering) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%18 = vector.transfer_read %subview_0[%17, %arg5], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%subview_1 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<1x16xf16, strided<[16, 1]>> | |
%19 = vector.shape_cast %18 : vector<16x1xf16> to vector<16xf16> | |
%subview_2 = memref.subview %subview_1[0, 0] [1, 16] [1, 1] : memref<1x16xf16, strided<[16, 1]>> to memref<16xf16, strided<[1]>> | |
vector.transfer_write %19, %subview_2[%c0] {in_bounds = [true]} : vector<16xf16>, memref<16xf16, strided<[1]>> | |
%subview_3 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_4 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_5 = memref.subview %subview_3[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_4 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_5 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%18 = vector.transfer_read %subview_0[%17, %arg5], %cst {in_bounds = [true, true]} : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>, vector<16x1xf16> | |
%subview_1 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<1x16xf16, strided<[16, 1]>> | |
%19 = vector.shape_cast %18 : vector<16x1xf16> to vector<16xf16> | |
%subview_2 = memref.subview %subview_1[0, 0] [1, 16] [1, 1] : memref<1x16xf16, strided<[16, 1]>> to memref<16xf16, strided<[1]>> | |
vector.transfer_write %19, %subview_2[%c0] {in_bounds = [true]} : vector<16xf16>, memref<16xf16, strided<[1]>> | |
%subview_3 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_4 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_5 = memref.subview %subview_3[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_4 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_5 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPUVectorTransferLowering (iree-llvmcpu-vector-transfer-lowering) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant dense<0.000000e+00> : vector<16x1xf16> | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
%18 = memref.load %subview_0[%17, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%19 = vector.broadcast %18 : f16 to vector<1xf16> | |
%20 = vector.insert %19, %cst [0] : vector<1xf16> into vector<16x1xf16> | |
%21 = affine.apply affine_map<(d0) -> (d0 * 16 + 1)>(%arg4) | |
%22 = memref.load %subview_0[%21, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%23 = vector.broadcast %22 : f16 to vector<1xf16> | |
%24 = vector.insert %23, %20 [1] : vector<1xf16> into vector<16x1xf16> | |
%25 = affine.apply affine_map<(d0) -> (d0 * 16 + 2)>(%arg4) | |
%26 = memref.load %subview_0[%25, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%27 = vector.broadcast %26 : f16 to vector<1xf16> | |
%28 = vector.insert %27, %24 [2] : vector<1xf16> into vector<16x1xf16> | |
%29 = affine.apply affine_map<(d0) -> (d0 * 16 + 3)>(%arg4) | |
%30 = memref.load %subview_0[%29, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%31 = vector.broadcast %30 : f16 to vector<1xf16> | |
%32 = vector.insert %31, %28 [3] : vector<1xf16> into vector<16x1xf16> | |
%33 = affine.apply affine_map<(d0) -> (d0 * 16 + 4)>(%arg4) | |
%34 = memref.load %subview_0[%33, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%35 = vector.broadcast %34 : f16 to vector<1xf16> | |
%36 = vector.insert %35, %32 [4] : vector<1xf16> into vector<16x1xf16> | |
%37 = affine.apply affine_map<(d0) -> (d0 * 16 + 5)>(%arg4) | |
%38 = memref.load %subview_0[%37, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%39 = vector.broadcast %38 : f16 to vector<1xf16> | |
%40 = vector.insert %39, %36 [5] : vector<1xf16> into vector<16x1xf16> | |
%41 = affine.apply affine_map<(d0) -> (d0 * 16 + 6)>(%arg4) | |
%42 = memref.load %subview_0[%41, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%43 = vector.broadcast %42 : f16 to vector<1xf16> | |
%44 = vector.insert %43, %40 [6] : vector<1xf16> into vector<16x1xf16> | |
%45 = affine.apply affine_map<(d0) -> (d0 * 16 + 7)>(%arg4) | |
%46 = memref.load %subview_0[%45, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%47 = vector.broadcast %46 : f16 to vector<1xf16> | |
%48 = vector.insert %47, %44 [7] : vector<1xf16> into vector<16x1xf16> | |
%49 = affine.apply affine_map<(d0) -> (d0 * 16 + 8)>(%arg4) | |
%50 = memref.load %subview_0[%49, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%51 = vector.broadcast %50 : f16 to vector<1xf16> | |
%52 = vector.insert %51, %48 [8] : vector<1xf16> into vector<16x1xf16> | |
%53 = affine.apply affine_map<(d0) -> (d0 * 16 + 9)>(%arg4) | |
%54 = memref.load %subview_0[%53, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%55 = vector.broadcast %54 : f16 to vector<1xf16> | |
%56 = vector.insert %55, %52 [9] : vector<1xf16> into vector<16x1xf16> | |
%57 = affine.apply affine_map<(d0) -> (d0 * 16 + 10)>(%arg4) | |
%58 = memref.load %subview_0[%57, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%59 = vector.broadcast %58 : f16 to vector<1xf16> | |
%60 = vector.insert %59, %56 [10] : vector<1xf16> into vector<16x1xf16> | |
%61 = affine.apply affine_map<(d0) -> (d0 * 16 + 11)>(%arg4) | |
%62 = memref.load %subview_0[%61, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%63 = vector.broadcast %62 : f16 to vector<1xf16> | |
%64 = vector.insert %63, %60 [11] : vector<1xf16> into vector<16x1xf16> | |
%65 = affine.apply affine_map<(d0) -> (d0 * 16 + 12)>(%arg4) | |
%66 = memref.load %subview_0[%65, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%67 = vector.broadcast %66 : f16 to vector<1xf16> | |
%68 = vector.insert %67, %64 [12] : vector<1xf16> into vector<16x1xf16> | |
%69 = affine.apply affine_map<(d0) -> (d0 * 16 + 13)>(%arg4) | |
%70 = memref.load %subview_0[%69, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%71 = vector.broadcast %70 : f16 to vector<1xf16> | |
%72 = vector.insert %71, %68 [13] : vector<1xf16> into vector<16x1xf16> | |
%73 = affine.apply affine_map<(d0) -> (d0 * 16 + 14)>(%arg4) | |
%74 = memref.load %subview_0[%73, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%75 = vector.broadcast %74 : f16 to vector<1xf16> | |
%76 = vector.insert %75, %72 [14] : vector<1xf16> into vector<16x1xf16> | |
%77 = affine.apply affine_map<(d0) -> (d0 * 16 + 15)>(%arg4) | |
%78 = memref.load %subview_0[%77, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%79 = vector.broadcast %78 : f16 to vector<1xf16> | |
%80 = vector.insert %79, %76 [15] : vector<1xf16> into vector<16x1xf16> | |
%subview_1 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<1x16xf16, strided<[16, 1]>> | |
%81 = vector.shape_cast %80 : vector<16x1xf16> to vector<16xf16> | |
%subview_2 = memref.subview %subview_1[0, 0] [1, 16] [1, 1] : memref<1x16xf16, strided<[16, 1]>> to memref<16xf16, strided<[1]>> | |
vector.store %81, %subview_2[%c0] : memref<16xf16, strided<[1]>>, vector<16xf16> | |
%subview_3 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_4 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_5 = memref.subview %subview_3[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_4 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_5 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPUVectorTransposeLowering (iree-llvmcpu-vector-transpose-lowering) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant dense<0.000000e+00> : vector<16x1xf16> | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
%18 = memref.load %subview_0[%17, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%19 = vector.broadcast %18 : f16 to vector<1xf16> | |
%20 = vector.insert %19, %cst [0] : vector<1xf16> into vector<16x1xf16> | |
%21 = affine.apply affine_map<(d0) -> (d0 * 16 + 1)>(%arg4) | |
%22 = memref.load %subview_0[%21, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%23 = vector.broadcast %22 : f16 to vector<1xf16> | |
%24 = vector.insert %23, %20 [1] : vector<1xf16> into vector<16x1xf16> | |
%25 = affine.apply affine_map<(d0) -> (d0 * 16 + 2)>(%arg4) | |
%26 = memref.load %subview_0[%25, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%27 = vector.broadcast %26 : f16 to vector<1xf16> | |
%28 = vector.insert %27, %24 [2] : vector<1xf16> into vector<16x1xf16> | |
%29 = affine.apply affine_map<(d0) -> (d0 * 16 + 3)>(%arg4) | |
%30 = memref.load %subview_0[%29, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%31 = vector.broadcast %30 : f16 to vector<1xf16> | |
%32 = vector.insert %31, %28 [3] : vector<1xf16> into vector<16x1xf16> | |
%33 = affine.apply affine_map<(d0) -> (d0 * 16 + 4)>(%arg4) | |
%34 = memref.load %subview_0[%33, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%35 = vector.broadcast %34 : f16 to vector<1xf16> | |
%36 = vector.insert %35, %32 [4] : vector<1xf16> into vector<16x1xf16> | |
%37 = affine.apply affine_map<(d0) -> (d0 * 16 + 5)>(%arg4) | |
%38 = memref.load %subview_0[%37, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%39 = vector.broadcast %38 : f16 to vector<1xf16> | |
%40 = vector.insert %39, %36 [5] : vector<1xf16> into vector<16x1xf16> | |
%41 = affine.apply affine_map<(d0) -> (d0 * 16 + 6)>(%arg4) | |
%42 = memref.load %subview_0[%41, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%43 = vector.broadcast %42 : f16 to vector<1xf16> | |
%44 = vector.insert %43, %40 [6] : vector<1xf16> into vector<16x1xf16> | |
%45 = affine.apply affine_map<(d0) -> (d0 * 16 + 7)>(%arg4) | |
%46 = memref.load %subview_0[%45, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%47 = vector.broadcast %46 : f16 to vector<1xf16> | |
%48 = vector.insert %47, %44 [7] : vector<1xf16> into vector<16x1xf16> | |
%49 = affine.apply affine_map<(d0) -> (d0 * 16 + 8)>(%arg4) | |
%50 = memref.load %subview_0[%49, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%51 = vector.broadcast %50 : f16 to vector<1xf16> | |
%52 = vector.insert %51, %48 [8] : vector<1xf16> into vector<16x1xf16> | |
%53 = affine.apply affine_map<(d0) -> (d0 * 16 + 9)>(%arg4) | |
%54 = memref.load %subview_0[%53, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%55 = vector.broadcast %54 : f16 to vector<1xf16> | |
%56 = vector.insert %55, %52 [9] : vector<1xf16> into vector<16x1xf16> | |
%57 = affine.apply affine_map<(d0) -> (d0 * 16 + 10)>(%arg4) | |
%58 = memref.load %subview_0[%57, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%59 = vector.broadcast %58 : f16 to vector<1xf16> | |
%60 = vector.insert %59, %56 [10] : vector<1xf16> into vector<16x1xf16> | |
%61 = affine.apply affine_map<(d0) -> (d0 * 16 + 11)>(%arg4) | |
%62 = memref.load %subview_0[%61, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%63 = vector.broadcast %62 : f16 to vector<1xf16> | |
%64 = vector.insert %63, %60 [11] : vector<1xf16> into vector<16x1xf16> | |
%65 = affine.apply affine_map<(d0) -> (d0 * 16 + 12)>(%arg4) | |
%66 = memref.load %subview_0[%65, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%67 = vector.broadcast %66 : f16 to vector<1xf16> | |
%68 = vector.insert %67, %64 [12] : vector<1xf16> into vector<16x1xf16> | |
%69 = affine.apply affine_map<(d0) -> (d0 * 16 + 13)>(%arg4) | |
%70 = memref.load %subview_0[%69, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%71 = vector.broadcast %70 : f16 to vector<1xf16> | |
%72 = vector.insert %71, %68 [13] : vector<1xf16> into vector<16x1xf16> | |
%73 = affine.apply affine_map<(d0) -> (d0 * 16 + 14)>(%arg4) | |
%74 = memref.load %subview_0[%73, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%75 = vector.broadcast %74 : f16 to vector<1xf16> | |
%76 = vector.insert %75, %72 [14] : vector<1xf16> into vector<16x1xf16> | |
%77 = affine.apply affine_map<(d0) -> (d0 * 16 + 15)>(%arg4) | |
%78 = memref.load %subview_0[%77, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%79 = vector.broadcast %78 : f16 to vector<1xf16> | |
%80 = vector.insert %79, %76 [15] : vector<1xf16> into vector<16x1xf16> | |
%subview_1 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<1x16xf16, strided<[16, 1]>> | |
%81 = vector.shape_cast %80 : vector<16x1xf16> to vector<16xf16> | |
%subview_2 = memref.subview %subview_1[0, 0] [1, 16] [1, 1] : memref<1x16xf16, strided<[16, 1]>> to memref<16xf16, strided<[1]>> | |
vector.store %81, %subview_2[%c0] : memref<16xf16, strided<[1]>>, vector<16xf16> | |
%subview_3 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_4 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_5 = memref.subview %subview_3[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_4 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_5 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant dense<0.000000e+00> : vector<16x1xf16> | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
%18 = memref.load %subview_0[%17, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%19 = vector.broadcast %18 : f16 to vector<1xf16> | |
%20 = vector.insert %19, %cst [0] : vector<1xf16> into vector<16x1xf16> | |
%21 = affine.apply affine_map<(d0) -> (d0 * 16 + 1)>(%arg4) | |
%22 = memref.load %subview_0[%21, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%23 = vector.broadcast %22 : f16 to vector<1xf16> | |
%24 = vector.insert %23, %20 [1] : vector<1xf16> into vector<16x1xf16> | |
%25 = affine.apply affine_map<(d0) -> (d0 * 16 + 2)>(%arg4) | |
%26 = memref.load %subview_0[%25, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%27 = vector.broadcast %26 : f16 to vector<1xf16> | |
%28 = vector.insert %27, %24 [2] : vector<1xf16> into vector<16x1xf16> | |
%29 = affine.apply affine_map<(d0) -> (d0 * 16 + 3)>(%arg4) | |
%30 = memref.load %subview_0[%29, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%31 = vector.broadcast %30 : f16 to vector<1xf16> | |
%32 = vector.insert %31, %28 [3] : vector<1xf16> into vector<16x1xf16> | |
%33 = affine.apply affine_map<(d0) -> (d0 * 16 + 4)>(%arg4) | |
%34 = memref.load %subview_0[%33, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%35 = vector.broadcast %34 : f16 to vector<1xf16> | |
%36 = vector.insert %35, %32 [4] : vector<1xf16> into vector<16x1xf16> | |
%37 = affine.apply affine_map<(d0) -> (d0 * 16 + 5)>(%arg4) | |
%38 = memref.load %subview_0[%37, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%39 = vector.broadcast %38 : f16 to vector<1xf16> | |
%40 = vector.insert %39, %36 [5] : vector<1xf16> into vector<16x1xf16> | |
%41 = affine.apply affine_map<(d0) -> (d0 * 16 + 6)>(%arg4) | |
%42 = memref.load %subview_0[%41, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%43 = vector.broadcast %42 : f16 to vector<1xf16> | |
%44 = vector.insert %43, %40 [6] : vector<1xf16> into vector<16x1xf16> | |
%45 = affine.apply affine_map<(d0) -> (d0 * 16 + 7)>(%arg4) | |
%46 = memref.load %subview_0[%45, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%47 = vector.broadcast %46 : f16 to vector<1xf16> | |
%48 = vector.insert %47, %44 [7] : vector<1xf16> into vector<16x1xf16> | |
%49 = affine.apply affine_map<(d0) -> (d0 * 16 + 8)>(%arg4) | |
%50 = memref.load %subview_0[%49, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%51 = vector.broadcast %50 : f16 to vector<1xf16> | |
%52 = vector.insert %51, %48 [8] : vector<1xf16> into vector<16x1xf16> | |
%53 = affine.apply affine_map<(d0) -> (d0 * 16 + 9)>(%arg4) | |
%54 = memref.load %subview_0[%53, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%55 = vector.broadcast %54 : f16 to vector<1xf16> | |
%56 = vector.insert %55, %52 [9] : vector<1xf16> into vector<16x1xf16> | |
%57 = affine.apply affine_map<(d0) -> (d0 * 16 + 10)>(%arg4) | |
%58 = memref.load %subview_0[%57, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%59 = vector.broadcast %58 : f16 to vector<1xf16> | |
%60 = vector.insert %59, %56 [10] : vector<1xf16> into vector<16x1xf16> | |
%61 = affine.apply affine_map<(d0) -> (d0 * 16 + 11)>(%arg4) | |
%62 = memref.load %subview_0[%61, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%63 = vector.broadcast %62 : f16 to vector<1xf16> | |
%64 = vector.insert %63, %60 [11] : vector<1xf16> into vector<16x1xf16> | |
%65 = affine.apply affine_map<(d0) -> (d0 * 16 + 12)>(%arg4) | |
%66 = memref.load %subview_0[%65, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%67 = vector.broadcast %66 : f16 to vector<1xf16> | |
%68 = vector.insert %67, %64 [12] : vector<1xf16> into vector<16x1xf16> | |
%69 = affine.apply affine_map<(d0) -> (d0 * 16 + 13)>(%arg4) | |
%70 = memref.load %subview_0[%69, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%71 = vector.broadcast %70 : f16 to vector<1xf16> | |
%72 = vector.insert %71, %68 [13] : vector<1xf16> into vector<16x1xf16> | |
%73 = affine.apply affine_map<(d0) -> (d0 * 16 + 14)>(%arg4) | |
%74 = memref.load %subview_0[%73, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%75 = vector.broadcast %74 : f16 to vector<1xf16> | |
%76 = vector.insert %75, %72 [14] : vector<1xf16> into vector<16x1xf16> | |
%77 = affine.apply affine_map<(d0) -> (d0 * 16 + 15)>(%arg4) | |
%78 = memref.load %subview_0[%77, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%79 = vector.broadcast %78 : f16 to vector<1xf16> | |
%80 = vector.insert %79, %76 [15] : vector<1xf16> into vector<16x1xf16> | |
%subview_1 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<1x16xf16, strided<[16, 1]>> | |
%81 = vector.shape_cast %80 : vector<16x1xf16> to vector<16xf16> | |
%subview_2 = memref.subview %subview_1[0, 0] [1, 16] [1, 1] : memref<1x16xf16, strided<[16, 1]>> to memref<16xf16, strided<[1]>> | |
vector.store %81, %subview_2[%c0] : memref<16xf16, strided<[1]>>, vector<16xf16> | |
%subview_3 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_4 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_5 = memref.subview %subview_3[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_4 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_5 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPUVectorShapeCastLowering (iree-llvmcpu-vector-shape-cast-lowering) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant dense<0.000000e+00> : vector<16xf16> | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
%18 = memref.load %subview_0[%17, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%19 = vector.broadcast %18 : f16 to vector<1xf16> | |
%20 = affine.apply affine_map<(d0) -> (d0 * 16 + 1)>(%arg4) | |
%21 = memref.load %subview_0[%20, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%22 = vector.broadcast %21 : f16 to vector<1xf16> | |
%23 = affine.apply affine_map<(d0) -> (d0 * 16 + 2)>(%arg4) | |
%24 = memref.load %subview_0[%23, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%25 = vector.broadcast %24 : f16 to vector<1xf16> | |
%26 = affine.apply affine_map<(d0) -> (d0 * 16 + 3)>(%arg4) | |
%27 = memref.load %subview_0[%26, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%28 = vector.broadcast %27 : f16 to vector<1xf16> | |
%29 = affine.apply affine_map<(d0) -> (d0 * 16 + 4)>(%arg4) | |
%30 = memref.load %subview_0[%29, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%31 = vector.broadcast %30 : f16 to vector<1xf16> | |
%32 = affine.apply affine_map<(d0) -> (d0 * 16 + 5)>(%arg4) | |
%33 = memref.load %subview_0[%32, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%34 = vector.broadcast %33 : f16 to vector<1xf16> | |
%35 = affine.apply affine_map<(d0) -> (d0 * 16 + 6)>(%arg4) | |
%36 = memref.load %subview_0[%35, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%37 = vector.broadcast %36 : f16 to vector<1xf16> | |
%38 = affine.apply affine_map<(d0) -> (d0 * 16 + 7)>(%arg4) | |
%39 = memref.load %subview_0[%38, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%40 = vector.broadcast %39 : f16 to vector<1xf16> | |
%41 = affine.apply affine_map<(d0) -> (d0 * 16 + 8)>(%arg4) | |
%42 = memref.load %subview_0[%41, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%43 = vector.broadcast %42 : f16 to vector<1xf16> | |
%44 = affine.apply affine_map<(d0) -> (d0 * 16 + 9)>(%arg4) | |
%45 = memref.load %subview_0[%44, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%46 = vector.broadcast %45 : f16 to vector<1xf16> | |
%47 = affine.apply affine_map<(d0) -> (d0 * 16 + 10)>(%arg4) | |
%48 = memref.load %subview_0[%47, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%49 = vector.broadcast %48 : f16 to vector<1xf16> | |
%50 = affine.apply affine_map<(d0) -> (d0 * 16 + 11)>(%arg4) | |
%51 = memref.load %subview_0[%50, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%52 = vector.broadcast %51 : f16 to vector<1xf16> | |
%53 = affine.apply affine_map<(d0) -> (d0 * 16 + 12)>(%arg4) | |
%54 = memref.load %subview_0[%53, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%55 = vector.broadcast %54 : f16 to vector<1xf16> | |
%56 = affine.apply affine_map<(d0) -> (d0 * 16 + 13)>(%arg4) | |
%57 = memref.load %subview_0[%56, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%58 = vector.broadcast %57 : f16 to vector<1xf16> | |
%59 = affine.apply affine_map<(d0) -> (d0 * 16 + 14)>(%arg4) | |
%60 = memref.load %subview_0[%59, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%61 = vector.broadcast %60 : f16 to vector<1xf16> | |
%62 = affine.apply affine_map<(d0) -> (d0 * 16 + 15)>(%arg4) | |
%63 = memref.load %subview_0[%62, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%64 = vector.broadcast %63 : f16 to vector<1xf16> | |
%subview_1 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<1x16xf16, strided<[16, 1]>> | |
%65 = vector.insert_strided_slice %19, %cst {offsets = [0], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%66 = vector.insert_strided_slice %22, %65 {offsets = [1], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%67 = vector.insert_strided_slice %25, %66 {offsets = [2], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%68 = vector.insert_strided_slice %28, %67 {offsets = [3], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%69 = vector.insert_strided_slice %31, %68 {offsets = [4], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%70 = vector.insert_strided_slice %34, %69 {offsets = [5], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%71 = vector.insert_strided_slice %37, %70 {offsets = [6], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%72 = vector.insert_strided_slice %40, %71 {offsets = [7], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%73 = vector.insert_strided_slice %43, %72 {offsets = [8], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%74 = vector.insert_strided_slice %46, %73 {offsets = [9], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%75 = vector.insert_strided_slice %49, %74 {offsets = [10], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%76 = vector.insert_strided_slice %52, %75 {offsets = [11], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%77 = vector.insert_strided_slice %55, %76 {offsets = [12], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%78 = vector.insert_strided_slice %58, %77 {offsets = [13], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%79 = vector.insert_strided_slice %61, %78 {offsets = [14], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%80 = vector.insert_strided_slice %64, %79 {offsets = [15], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%subview_2 = memref.subview %subview_1[0, 0] [1, 16] [1, 1] : memref<1x16xf16, strided<[16, 1]>> to memref<16xf16, strided<[1]>> | |
vector.store %80, %subview_2[%c0] : memref<16xf16, strided<[1]>>, vector<16xf16> | |
%subview_3 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_4 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_5 = memref.subview %subview_3[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_4 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_5 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After LLVMCPULowerExecutableTarget (iree-llvmcpu-lower-executable-target) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant dense<0.000000e+00> : vector<16xf16> | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x] | |
%14 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x] | |
scf.for %arg0 = %9 to %6 step %10 { | |
%15 = affine.min affine_map<(d0)[s0] -> (-d0 + s0, 64)>(%arg0)[%6] | |
scf.for %arg1 = %11 to %c540 step %12 { | |
%16 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg1) | |
scf.for %arg2 = %13 to %c3200 step %14 { | |
%subview = memref.subview %8[%arg0, %arg1, %arg2, 0, 0] [%15, 4, 64, 16, 1] [1, 1, 1, 1, 1] : memref<?x540x3200x16x1xf16, #hal.descriptor_type<storage_buffer>> to memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_0 = memref.subview %7[%16, %arg2] [64, 64] [1, 1] : memref<8640x3200xf16, #hal.descriptor_type<storage_buffer>> to memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
scf.for %arg3 = %c0 to %15 step %c1 { | |
scf.for %arg4 = %c0 to %c4 step %c1 { | |
scf.for %arg5 = %c0 to %c64 step %c1 { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg4) | |
%18 = memref.load %subview_0[%17, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%19 = vector.broadcast %18 : f16 to vector<1xf16> | |
%20 = affine.apply affine_map<(d0) -> (d0 * 16 + 1)>(%arg4) | |
%21 = memref.load %subview_0[%20, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%22 = vector.broadcast %21 : f16 to vector<1xf16> | |
%23 = affine.apply affine_map<(d0) -> (d0 * 16 + 2)>(%arg4) | |
%24 = memref.load %subview_0[%23, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%25 = vector.broadcast %24 : f16 to vector<1xf16> | |
%26 = affine.apply affine_map<(d0) -> (d0 * 16 + 3)>(%arg4) | |
%27 = memref.load %subview_0[%26, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%28 = vector.broadcast %27 : f16 to vector<1xf16> | |
%29 = affine.apply affine_map<(d0) -> (d0 * 16 + 4)>(%arg4) | |
%30 = memref.load %subview_0[%29, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%31 = vector.broadcast %30 : f16 to vector<1xf16> | |
%32 = affine.apply affine_map<(d0) -> (d0 * 16 + 5)>(%arg4) | |
%33 = memref.load %subview_0[%32, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%34 = vector.broadcast %33 : f16 to vector<1xf16> | |
%35 = affine.apply affine_map<(d0) -> (d0 * 16 + 6)>(%arg4) | |
%36 = memref.load %subview_0[%35, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%37 = vector.broadcast %36 : f16 to vector<1xf16> | |
%38 = affine.apply affine_map<(d0) -> (d0 * 16 + 7)>(%arg4) | |
%39 = memref.load %subview_0[%38, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%40 = vector.broadcast %39 : f16 to vector<1xf16> | |
%41 = affine.apply affine_map<(d0) -> (d0 * 16 + 8)>(%arg4) | |
%42 = memref.load %subview_0[%41, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%43 = vector.broadcast %42 : f16 to vector<1xf16> | |
%44 = affine.apply affine_map<(d0) -> (d0 * 16 + 9)>(%arg4) | |
%45 = memref.load %subview_0[%44, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%46 = vector.broadcast %45 : f16 to vector<1xf16> | |
%47 = affine.apply affine_map<(d0) -> (d0 * 16 + 10)>(%arg4) | |
%48 = memref.load %subview_0[%47, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%49 = vector.broadcast %48 : f16 to vector<1xf16> | |
%50 = affine.apply affine_map<(d0) -> (d0 * 16 + 11)>(%arg4) | |
%51 = memref.load %subview_0[%50, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%52 = vector.broadcast %51 : f16 to vector<1xf16> | |
%53 = affine.apply affine_map<(d0) -> (d0 * 16 + 12)>(%arg4) | |
%54 = memref.load %subview_0[%53, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%55 = vector.broadcast %54 : f16 to vector<1xf16> | |
%56 = affine.apply affine_map<(d0) -> (d0 * 16 + 13)>(%arg4) | |
%57 = memref.load %subview_0[%56, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%58 = vector.broadcast %57 : f16 to vector<1xf16> | |
%59 = affine.apply affine_map<(d0) -> (d0 * 16 + 14)>(%arg4) | |
%60 = memref.load %subview_0[%59, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%61 = vector.broadcast %60 : f16 to vector<1xf16> | |
%62 = affine.apply affine_map<(d0) -> (d0 * 16 + 15)>(%arg4) | |
%63 = memref.load %subview_0[%62, %arg5] : memref<64x64xf16, strided<[3200, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%64 = vector.broadcast %63 : f16 to vector<1xf16> | |
%subview_1 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<1x16xf16, strided<[16, 1]>> | |
%65 = vector.insert_strided_slice %19, %cst {offsets = [0], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%66 = vector.insert_strided_slice %22, %65 {offsets = [1], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%67 = vector.insert_strided_slice %25, %66 {offsets = [2], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%68 = vector.insert_strided_slice %28, %67 {offsets = [3], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%69 = vector.insert_strided_slice %31, %68 {offsets = [4], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%70 = vector.insert_strided_slice %34, %69 {offsets = [5], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%71 = vector.insert_strided_slice %37, %70 {offsets = [6], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%72 = vector.insert_strided_slice %40, %71 {offsets = [7], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%73 = vector.insert_strided_slice %43, %72 {offsets = [8], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%74 = vector.insert_strided_slice %46, %73 {offsets = [9], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%75 = vector.insert_strided_slice %49, %74 {offsets = [10], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%76 = vector.insert_strided_slice %52, %75 {offsets = [11], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%77 = vector.insert_strided_slice %55, %76 {offsets = [12], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%78 = vector.insert_strided_slice %58, %77 {offsets = [13], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%79 = vector.insert_strided_slice %61, %78 {offsets = [14], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%80 = vector.insert_strided_slice %64, %79 {offsets = [15], strides = [1]} : vector<1xf16> into vector<16xf16> | |
%subview_2 = memref.subview %subview_1[0, 0] [1, 16] [1, 1] : memref<1x16xf16, strided<[16, 1]>> to memref<16xf16, strided<[1]>> | |
vector.store %80, %subview_2[%c0] : memref<16xf16, strided<[1]>>, vector<16xf16> | |
%subview_3 = memref.subview %subview[%arg3, %arg4, %arg5, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<?x4x64x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
%subview_4 = memref.subview %alloca[0, 0, 0] [1, 16, 1] [1, 1, 1] : memref<1x16x1xf16> to memref<16x1xf16, strided<[1, 1]>> | |
%subview_5 = memref.subview %subview_3[0, 0, 0, 0, 0] [1, 1, 1, 16, 1] [1, 1, 1, 1, 1] : memref<1x1x1x16x1xf16, strided<[27648000, 51200, 16, 1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> | |
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_4 : memref<16x1xf16, strided<[1, 1]>>) outs(%subview_5 : memref<16x1xf16, strided<[1, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return | |
} | |
// -----// IR Dump After EraseHALDescriptorTypeFromMemRef (iree-codegen-erase-hal-descriptor-type-from-memref) //----- // | |
func.func @broadcast_pack_kernel_dispatch_0_broadcast_Dx8640x3200_f16_pack() attributes {translation_info = #iree_codegen.translation_info<CPUDoubleTilingExpert>} { | |
%cst = arith.constant dense<0.000000e+00> : vector<16xf16> | |
%c32_i64 = arith.constant 32 : i64 | |
%c0 = arith.constant 0 : index | |
%c540 = arith.constant 540 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%c64 = arith.constant 64 : index | |
%alloca = memref.alloca() {alignment = 64 : i64} : memref<1x16x1xf16> | |
%0 = hal.interface.constant.load[0] : i32 | |
%1 = hal.interface.constant.load[1] : i32 | |
%2 = arith.extui %0 : i32 to i64 | |
%3 = arith.extui %1 : i32 to i64 | |
%4 = arith.shli %3, %c32_i64 : i64 | |
%5 = arith.ori %2, %4 : i64 | |
%6 = arith.index_castui %5 : i64 to index | |
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8640x3200xf16> | |
memref.assume_alignment %7, 64 : memref<8640x3200xf16> | |
%8 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) : memref<?x540x3200x16x1xf16>{%6} | |
memref.assume_alignment %8, 64 : memref<?x540x3200x16x1xf16> | |
%workgroup_id_x = hal.interface.workgroup.id[0] : index | |
%workgroup_count_x = hal.interface.workgroup.count[0] : index | |
%workgroup_id_y = hal.interface.workgroup.id[1] : index | |
%workgroup_count_y = hal.interface.workgroup.count[1] : index | |
%workgroup_id_z = hal.interface.workgroup.id[2] : index | |
%workgroup_count_z = hal.interface.workgroup.count[2] : index | |
%9 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_z] | |
%10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_z] | |
%11 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y] | |
%12 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y] | |
%13 = affine.apply affine_map<()[s0] - |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment